+ ./ya make -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --add-peerdirs-tests all --build release --sanitize=address -DDEBUGINFO_LINES_ONLY --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends -A --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.JC0P60lvkD --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest ydb --build-custom-json=/home/runner/actions_runner/_work/ydb/ydb/graph.json --custom-context=/home/runner/actions_runner/_work/ydb/ydb/context.json --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES | 0.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/mediator/ut/unittest | 1.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut | 1.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/test-results/unittest/{meta.json ... results_accumulator.log} | 2.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serializable/py3test | 3.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} | 3.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/scheme_board/pile_promotion/pile_promotion_workload | 3.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/tests/library/ut/py3test | 3.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sequenceshard/public/ut/unittest | 3.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} | 3.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/s3/provider/ut/unittest | 4.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} | 4.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} | 4.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/memory_controller/ut/unittest | 4.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} | 4.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} | 4.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic_kafka/workload/libpy3stress-topic_kafka-workload.global.a | 5.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/test-results/unittest/{meta.json ... results_accumulator.log} | 5.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/pqtablet/partition/mlp/ut/unittest | 5.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} | 5.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/kv/tests/py3test | 5.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} | 6.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/long_tx_service/public/ut/unittest | 8.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a | 8.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a | 8.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/http-parser/libcontrib-deprecated-http-parser.a | 9.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |10.0%| [AR] {BAZEL_DOWNLOAD} $(B)/build/cow/on/libbuild-cow-on.a |10.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |11.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |11.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |11.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |11.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |11.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |11.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |11.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |11.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |11.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |11.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |11.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |11.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |11.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |12.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |12.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |12.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/system/libsystem_allocator.a |12.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |12.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |12.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |12.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |12.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |12.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |12.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |12.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |12.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |13.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |13.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |13.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |13.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |13.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |13.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |13.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |13.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |13.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |13.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |13.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |13.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |13.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |13.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |13.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |13.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |13.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |13.9%| PREPARE $(CLANG_FORMAT-3815817643) |13.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |14.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |14.0%| PREPARE $(YMAKE_PYTHON3-212672652) - 10.43 MB |14.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |14.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |14.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |14.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |15.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/arrow/unittest |15.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |15.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |15.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |15.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |15.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |15.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |15.5%| PREPARE $(YMAKE_PYTHON3) |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |15.8%| PREPARE $(LLD_ROOT-3107549726) - 25.18 MB |15.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |15.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |16.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |16.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |16.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |16.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |16.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |16.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |16.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |16.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |16.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |16.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |16.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |16.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |16.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |16.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |17.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |16.8%| PREPARE $(TEST_TOOL_HOST-sbr:10669990147) - 25.18 MB |16.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |16.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |17.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |17.1%| PREPARE $(FLAKE8_PY3-3596799299) - 3.26 MB |17.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |17.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |17.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |17.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |17.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |17.4%| PREPARE $(PYTHON) - 50.50 MB |17.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |17.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |17.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |17.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |17.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |17.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |17.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |17.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |17.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |18.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |18.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |18.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |18.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |18.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |19.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |19.3%| PREPARE $(VCS) |19.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |19.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |19.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |19.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a |19.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |20.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/proto/libpy3utils-fetch-proto.global.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |20.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |20.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |20.1%| [CP] {default-linux-x86_64, release, asan} $(B)/common_test.context |20.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |20.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |20.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |20.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/workload_kv |20.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |21.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |21.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a |21.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |21.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |22.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |22.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |22.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |22.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |22.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/nbs/libpy3core-protos-nbs.global.a |22.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |22.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |22.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |22.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |22.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |22.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |23.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |23.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |23.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |23.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |23.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |23.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |23.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |23.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |24.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a |24.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |24.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |24.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |24.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |24.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |24.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |24.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/node_broker |25.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |25.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |25.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |25.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/core/ut_fat/ydb-library-actors-core-ut_fat |25.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |25.7%| PREPARE $(RUFF-3583455953) - 9.14 MB |25.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tz_types/libyt-library-tz_types.a |25.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |26.0%| PREPARE $(BLACK-1616416287) - 7.45 MB |26.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |25.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |26.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |26.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |26.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |26.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |26.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |26.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |26.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |27.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/full_capture/libyt-lib-full_capture.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/dump_helpers/libyt-lib-dump_helpers.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |27.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |27.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |27.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |27.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |27.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |27.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |27.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |27.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |27.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |27.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |27.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a |27.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |27.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a |27.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |27.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |27.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |27.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |28.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |28.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |28.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |28.7%| PREPARE $(CLANG-1922233694) - 218.12 MB |28.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |28.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |28.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |28.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |28.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |29.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |29.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |29.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |29.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |29.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |29.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |29.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |29.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a |29.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |30.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |30.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |30.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |30.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |30.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |30.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |30.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |30.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |30.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |30.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |30.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |30.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |30.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |31.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |31.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |31.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |31.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |31.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |31.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |31.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |31.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |31.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |31.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |31.5%| PREPARE $(CLANG16-1380963495) - 294.56 MB |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |31.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |31.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a |31.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |31.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |31.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |31.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |31.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |31.8%| PREPARE $(CLANG18-1866954364) - 318.78 MB |32.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |32.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a |32.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |32.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |32.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |32.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |32.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |32.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |32.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |32.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/proto/libutils-fetch-proto.a |32.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |32.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |32.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |32.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |32.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |33.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |33.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |33.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |33.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |33.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |33.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |33.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |33.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |33.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |33.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |33.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |33.7%| PREPARE $(CLANG-2403293607) - 285.73 MB |33.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |33.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |33.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |33.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/stats/libclient-impl-stats.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a |34.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |34.6%| PREPARE $(WITH_JDK-sbr:9470949154) - 176.18 MB |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/session/libclient-impl-session.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/value_helpers/libimpl-internal-value_helpers.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/executor/libclient-impl-executor.a |34.8%| PREPARE $(CLANG20-882982600) |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/endpoints/libclient-impl-endpoints.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/thread_pool/libimpl-internal-thread_pool.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |35.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |35.9%| PREPARE $(JDK17-2548586558) |36.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |35.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |36.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/rpc_client/libcpp-mapreduce-rpc_client.a |36.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |36.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |36.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |36.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |36.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |36.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |36.8%| PREPARE $(WITH_JDK17-sbr:9470949154) |36.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |37.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |37.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |37.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/common/ut/ydb-core-persqueue-common-ut |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohappyeyeballs/libpy3contrib-python-aiohappyeyeballs.global.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |37.6%| PREPARE $(JDK_DEFAULT-2548586558) |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |38.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signals/libydb-library-signals.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/range_treap/libydb-library-range_treap.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |40.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/libydb-core-viewer.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |40.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/hash_join_utils/libdq-comp_nodes-hash_join_utils.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/service/libtx-general_cache-service.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/llvm16/libdq-comp_nodes-llvm16.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_query.cpp |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |40.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_merge.cpp |40.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |40.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_pdisk.cpp |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |40.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_filter.cpp |40.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/wb_aggregate.cpp |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |40.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer_topic_data.cpp |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |40.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers.cpp |40.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_pq.cpp |40.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer_request.cpp |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |40.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_pipe_req.cpp |40.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_browse.cpp |41.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_vdisk.cpp |41.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer.cpp |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |41.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_operation.cpp |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/selector/liblcbuckets-planner-selector.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |41.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_handlers_scheme.cpp |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |41.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/json_wb_req.cpp |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/version/ut/version_ut.cpp |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split_antlr4/libproto_ast-gen-v1_proto_split_antlr4.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/audit/libclient-nc_private-audit.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/fulltext/libfulltext_udf.global.a |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/libapi-client-nc_private.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/audit/v1/common/libaudit-v1-common.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/common/v1/libnc_private-common-v1.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/v1/libnc_private-iam-v1.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/ini_config/libini_config.a |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/discovery_mutator/libclient-extensions-discovery_mutator.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/make_request/libimpl-internal-make_request.a |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/logger/libimpl-internal-logger.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/common/libimpl-internal-common.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/plain_status/libimpl-internal-plain_status.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/db_driver_state/libimpl-internal-db_driver_state.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/temp_files/libyt-lib-temp_files.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/grpc_connections/libimpl-internal-grpc_connections.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_file_download/libyt-lib-yt_file_download.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/token_manager/libcore-security-token_manager.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/sasl/libcore-security-sasl.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/codecs/libpersqueue-public-codecs.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/counters/libpersqueue-public-counters.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/fetcher/libpersqueue-public-fetcher.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/describer/libpersqueue-public-describer.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/cluster_tracker/libpersqueue-public-cluster_tracker.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/script_executions_utils/libkqp-proxy_service-script_executions_utils.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/readproxy/libpersqueue-pqtablet-readproxy.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/libydb-core-security.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/partition/mirrorer/libpqtablet-partition-mirrorer.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |46.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/partition/mlp/libpqtablet-partition-mlp.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/streaming_query/libgateway-behaviour-streaming_query.global.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/streaming_query/common/libbehaviour-streaming_query-common.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqrb/libcore-persqueue-pqrb.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/common/liblibs-row_dispatcher-common.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/streaming_query/libgateway-behaviour-streaming_query.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/utils/liblibs-control_plane_proxy-utils.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/events/liblibs-checkpointing-events.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/health_check/libydb-core-health_check.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/printer/libformats-arrow-printer.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/counters_info/libydb-core-counters_info.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/libydb-core-control.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/validation/libcore-config-validation.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/discovery/libydb-core-discovery.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/metadata/libblobstorage-vdisk-metadata.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |48.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/init/libcore-config-init.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/heartbeat_actor/libcore-audit-heartbeat_actor.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/bridge/syncer/libblobstorage-bridge-syncer.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/bridge/proxy/libblobstorage-bridge-proxy.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.global.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/tz/libcpp-type_info-tz.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/audit_config/libcore-audit-audit_config.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |50.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/version/libversion_definition.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/libydb-core-audit.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snowball/libcontrib-libs-snowball.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/libcontrib-restricted-abseil-cpp.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/regexp/libcore-backup-regexp.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |50.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/libcore-client-server.a |50.2%| [BI] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/buildinfo_data.h |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |50.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/librun.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/metadata/libblobstorage-pdisk-metadata.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |50.3%| [CP] {default-linux-x86_64, release, asan} $(B)/library/cpp/sanitizer/plugin/sanitizer.py.pyplugin |50.5%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |50.6%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |50.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |50.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/dec/libbrotli-c-dec.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/common/libbrotli-c-common.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang20-rt/lib/asan_cxx/libclang_rt.asan_cxx-x86_64.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/enc/libbrotli-c-enc.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/yajl/libcontrib-deprecated-yajl.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/libydb-core-base.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/phantom_flag_storage/libvdisk-synclog-phantom_flag_storage.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/base/libcontrol-lib-base.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/util/libcms-console-util.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ibdrv/libcontrib-libs-ibdrv.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/generated/libcontrol-lib-generated.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_types.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_node_registration.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang20-rt/lib/asan/libclang_rt.asan-x86_64.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_tx_request.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_test_shard_request.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/http_ping.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/dq/service/interconnect_helpers.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_blobstorage_config.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/grpc_proxy_status.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/ic_nodes_cache_service.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_cms.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/config_helpers.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_hive_create_tablet.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/main.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/service_initializer.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/factories.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/config_parser.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/service/grpc_session.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_http_server.cpp |50.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/dq/service/service_node.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_configdummy.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_state.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_fill_node.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_drain_node.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_console.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_tablet_counters.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_ic_debug.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/config.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_resolve_node.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_initroot.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/service/grpc_service.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compile_service/helpers/libkqp-compile_service-helpers.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/result_set_format/libkqp-common-result_set_format.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon/audit/libcore-mon-audit.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/common/proxy/libpersqueue-common-proxy.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/common/libcore-persqueue-common.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/common/libpersqueue-pqtablet-common.a |51.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/nbs/libcore-protos-nbs.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/cache/libpersqueue-pqtablet-cache.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/write_meta/libpersqueue-public-write_meta.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon/libydb-core-mon.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/partition_key_range/libpersqueue-public-partition_key_range.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/quota/libpersqueue-pqtablet-quota.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/list_topics/libpersqueue-public-list_topics.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/out/libcore-protos-out.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/libcore-persqueue-public.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/partition_index_generator/libpersqueue-public-partition_index_generator.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/mlp/libpersqueue-public-mlp.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/blob/libpersqueue-pqtablet-blob.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/compile_cache/libcore-sys_view-compile_cache.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/libydb-core-quoter.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |50.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |50.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/libcore-persqueue-pqtablet.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/backup/import/protos/libbackup-import-protos.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/streaming_queries/libcore-sys_view-streaming_queries.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/backup/import/libcolumnshard-backup-import.global.a |48.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/backup/async_jobs/libcolumnshard-backup-async_jobs.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/backup/import/libcolumnshard-backup-import.a |49.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a |49.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/backup/iscan/libcolumnshard-backup-iscan.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/internal/retry/libimpl-internal-retry.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/libydb-core-transfer.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |49.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/grpc_server.cpp |49.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_token_resolver/libyt-lib-yt_token_resolver.a |49.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/run.cpp |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/basics/appdata.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/basics/helpers.cpp |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/pqtablet/partition/libpersqueue-pqtablet-partition.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |50.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/basics/services.cpp |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |50.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ftxui/libcontrib-libs-ftxui.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/column_fetching/libtx-columnshard-column_fetching.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |50.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |50.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt.cpp |50.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_forwarding_gateway.cpp |50.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_create.cpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |50.5%| PREPARE $(OS_SDK_ROOT-sbr:243881345) - 5.86 MB |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_optimize.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_integration.cpp |50.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |50.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |50.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_field_subset.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_wide_flow.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_weak_fields.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_content.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_merge.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_fuse.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_helpers.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_hybrid.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet/libydb-core-tablet.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_phantom_blobs/blobstorage-ut_blobstorage-ut_phantom_blobs |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/appendable_compressed_file.cpp |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |51.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |51.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/testlib/basics/runtime.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |52.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |52.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/provided.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |52.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |52.5%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svn_interface.c |52.5%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |52.6%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/build_info/build_info_static.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/chaos_lease.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_file_client.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/chaos_lease_base.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/target_cluster_injecting_channel.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_file_session.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/cluster_directory.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |52.8%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svnversion.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/dynamic.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |52.9%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/constrained_schema.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backtrace.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adaptive_hedging_manager.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_log_codec.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/inotify.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |53.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_priority_provider.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/trie.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |53.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |53.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |54.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_ytql.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_constraints.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_finalize.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_input.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_cbo_helpers.cpp |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_filter.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_utils.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_trackable.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_impl.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_exec.cpp |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery_partitions.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_columnar_stats.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_gateway.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_epoch.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_optimize.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_intent_determination.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_layers_integration.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_integration.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_key.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_reorder.cpp |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery_walk_folders.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_context.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/yql_yt_op_settings.h_serialized.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_peephole.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_settings.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_table_meta.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_hash.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_optimize.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table_desc.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery.cpp |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_horizontal_join.cpp |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_optimize.cpp |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_output.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_helpers.cpp |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/diagnostics/libtx-columnshard-diagnostics.a |54.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_push.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/program/libcore-tx-program.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang20-rt/lib/asan_static/libclang_rt.asan_static-x86_64.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |54.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_impl.cpp |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/abstract/libiterator-sys_view-abstract.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_lambda.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp |54.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_key_range.cpp |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/common/libreader-common_reader-common.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/optimizer/libiterator-sys_view-optimizer.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/granules/libiterator-sys_view-granules.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/optimizer/libiterator-sys_view-optimizer.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |54.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_finalizing.cpp |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/tracing/libengines-reader-tracing.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/schemas/libiterator-sys_view-schemas.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/schemas/libiterator-sys_view-schemas.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/chunks/libiterator-sys_view-chunks.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/chunks/libiterator-sys_view-chunks.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/granules/libiterator-sys_view-granules.global.a |54.8%| PREPARE $(GDB) - 16.81 MB |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libssh2/libcontrib-libs-libssh2.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/portions/libiterator-sys_view-portions.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |54.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/crypto/quictls/libngtcp2-crypto-quictls.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sys_view/portions/libiterator-sys_view-portions.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |55.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/executor/libclient-types-executor.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |55.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/ucontext_impl/libboost-context-ucontext_impl.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/config/libydb-services-config.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/utf8_range/librestricted-google-utf8_range.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/time/libsrc-library-time.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |55.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/workload/libpy3stress-mixedpy-workload.global.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/concurrent_hash/libcpp-containers-concurrent_hash.a |55.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bridge/libydb-services-bridge.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |55.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |55.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/datastreams/codes/libservices-datastreams-codes.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/overload_manager/libtx-columnshard-overload_manager.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tracing/libtx-columnshard-tracing.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tx_completed/libsubscriber-events-tx_completed.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |55.7%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/workload/objcopy_ad01b515e6ee8206b6b9fafbc1.o |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/tracing/libtx-conveyor_composite-tracing.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/tracing/libtx-conveyor-tracing.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/source/libtx-general_cache-source.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |55.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/libydb-core-tx.a |55.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/tracing/liblimiter-grouped_memory-tracing.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/optimization/libservices-metadata-optimization.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/usage/libtx-general_cache-usage.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |54.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |54.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/deprecated/persqueue_v0/persqueue.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |54.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_clusters_updater_actor.cpp |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/cache_policy/libcolumnshard-data_accessor-cache_policy.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |55.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |55.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |55.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/workload/objcopy_2bb582236b38a353ed1448f645.o |55.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/workload/objcopy_e818dcbb59134fffc2af5b1d20.o |55.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/codecs.cpp |55.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/update_offsets_in_transaction_actor.cpp |55.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/direct_read_actor.cpp |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/helpers.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/read_init_auth_actor.cpp |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/persqueue_utils.cpp |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |55.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |55.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/distributed_commit_helper.cpp |55.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |55.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_writer_cache_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/sqs_topic/protos/receipt/libsqs_topic-protos-receipt.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/write_session_actor.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/sqs_topic/queue_url/holder/libsqs_topic-queue_url-holder.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/sqs_topic/queue_url/libservices-sqs_topic-queue_url.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/sqs_topic/libydb-services-sqs_topic.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/partition_actor.cpp |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |55.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/schema_actors.cpp |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/actors/read_session_actor.cpp |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |55.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/test_shard/libydb-services-test_shard.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |56.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |56.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/cfg/libymq-actor-cfg.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/libydb-core-util.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |56.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |56.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup_incremental__list.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |56.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__op_traits.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_streaming_query.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_secret.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |56.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_set_constraint_lock.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_set_constraint_finalize.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_set_constraint_check.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_set_constraint.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_streaming_query.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_streaming_query.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_secret.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_incremental_restore_finalize.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rotate_cdc_stream.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup_incremental__progress.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__shred_manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_shred_manager.cpp |56.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__root_shred_manager.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup_incremental__get.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup_incremental__forget.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_billing_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_continuous_backup_cleaner.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_effective_acl.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_scheme_builders.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |56.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_helpers.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |56.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |56.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_login_helper.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_helpers.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_index_build_info.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |56.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_index_build_info.h_serialized.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_restore_incremental__forget.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_restore_incremental__list.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |56.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_index_utils.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/user_attributes.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_incremental_restore_scan.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_subop_state_types.cpp |56.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/rdma/libactors-interconnect-rdma.a |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_tx_infly.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/rdma/cq_actor/libinterconnect-rdma-cq_actor.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_subop_types.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_restore_incremental__get.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_subop_state_types.h_serialized.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |56.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_system_names.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_sysviews_update.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |56.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_xxport__helpers.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |56.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__login_finalize.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |56.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |57.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |57.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/attributes/libcore-ymq-attributes.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/error/libcore-ymq-error.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/cloud_events/proto/libactor-cloud_events-proto.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/core/actorsystem.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |57.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_mon.cpp |57.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_tcp_input_session.cpp |57.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/async/liblibrary-actors-async.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/mem_pool.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/cq_actor/cq_actor.cpp |57.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_handshake.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/address/libactors-interconnect-address.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_channel.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/testlib/test_runtime.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ydb/libydb-services-ydb.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_proxy_wrapper.cpp |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_tcp_session.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/interconnect_tcp_proxy.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/poller/libactors-interconnect-poller.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |57.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/core/event_pb.cpp |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |57.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/libcontrib-restricted-abseil-cpp-tstring.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/activity_guard.cpp |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/actor.h_serialized.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/scheduler_cookie.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/thread_context.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/mailbox.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/scheduler_basic.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_stream.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/types.cpp |57.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_nameserver_table.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_resolve.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_pool_io.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/execution_stats.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_pool_shared.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/event_load.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_pool_base.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/rdma/link_manager.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/events_undelivered.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_pool_basic.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/buffer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/rdma/ctx.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/rdma/ctx_open.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/defs.h_serialized.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/callstack.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/event.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/events.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/rdma/rdma.cpp |57.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/cpu_manager.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/actor_coroutine.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/actor_bootstrapped.cpp |57.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut_service/fast/ydb-core-kqp-federated_query-ut_service-fast |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/actor.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/actorid.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/io_dispatcher.cpp |57.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/arrow/unittest |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_thread_ctx.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ask.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/interconnect.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_common.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/subscription_manager.cpp |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/executor_thread.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_tcp_server.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/packet.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/monotonic.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/log_settings.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_nameserver_dynamic.cpp |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/load.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_counters.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/log_buffer.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/interconnect_zc_processor.cpp |57.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/mon.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/log_iface.h_serialized.cpp |57.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/mon_stats.cpp |57.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/mailbox_lockfree.cpp |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/monotonic_provider.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/log.cpp |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/scheduler_actor.cpp |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/tests-stress-scheme_board-pile_promotion-tests |57.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/probes.cpp |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/process_stats.cpp |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/backup/backup_stress |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/url_lister/libcore-qplayer-url_lister.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/layers/libessentials-core-layers.a |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/backup/tests/ydb-tests-stress-backup-tests |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/cow_string/libcpp-containers-cow_string.a |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |57.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |57.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/tests/ydb-tests-stress-mixedpy-tests |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cron_expression/liblibrary-cpp-cron_expression.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/http/ut/ydb-library-actors-http-ut |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/show_create_view |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/sasl/liblibrary-login-sasl.a |57.7%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kafka/workload/libpy3stress-kafka-workload.global.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/hashes_checker/liblibrary-login-hashes_checker.a |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |57.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a |57.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/pqtablet/blob/ut/ydb-core-persqueue-pqtablet-blob-ut |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/public/partition_key_range/ut/ydb-core-persqueue-public-partition_key_range-ut |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |57.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |57.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/public/fetcher/ut/ydb-core-persqueue-public-fetcher-ut |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |57.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_7d0deb4120fbddf720c11b5358.o |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/plan2svg/libydb-library-plan2svg.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_927a1f7611cf94fb1cd21ef8cf.o |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/public/describer/ut/ydb-core-persqueue-public-describer-ut |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/libpy3node_broker.global.a |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/workload/objcopy_0e9ac60eb7f731d367df5e10f1.o |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_6d8369510b03c08a300f2e2657.o |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |57.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/workload/libpy3stress-kv-workload.global.a |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/library/libpy3tools-nemesis-library.global.a |58.0%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kafka/workload/objcopy_43f03d8ce0f7a500b6680c8688.o |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_e1e64d508ce59834ec0a40f731.o |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_b06d27009e49b9ba3df883a226.o |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/objcopy_2a9fba044b5f98d2ff5f5c7f44.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_ranges_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/workload/libpy3scheme_board-pile_promotion-workload.global.a |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/federated_query/ut_service/common/libfederated_query-ut_service-common.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/federated_query/ut_service/fast/kqp_federated_query_actors_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/auth_ut.cpp |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/objcopy_a457e57e9ccca716aa1224bf90.o |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/objcopy_a14abb13ecebd457a15fc48470.o |58.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut/allocator_ut.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stress/libpy3tests-library-stress.global.a |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut/ibv_ut.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/libpy3backup_stress.global.a |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/objcopy_f152d89e868e3e70c582478d88.o |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/objcopy_4f24d349611ecfcf19e3001adb.o |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |58.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut/utils.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/workload/libpy3stress-backup-workload.global.a |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_tablecell_ut.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/run_ut.cpp |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/tests/objcopy_2983697b65fe3fbc437e0acf1f.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/tests/objcopy_771af70678639da3d434ea1416.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/backup/tests/objcopy_aac602e1ee9075b2856f1975ca.o |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/libpy3pile_promotion_workload.global.a |58.1%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/workload/objcopy_33abb7f7b873fc17cb192a5592.o |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_helpers.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/security/simple/libmvp-security-simple.a |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/tests/objcopy_c4b248e24a6215fa53b9e5552d.o |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/tests/objcopy_1de592266ca9bc1d10b20d8e9a.o |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/access/libclient-yc_private-access.a |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/tests/objcopy_e25036fa51e72ace049084c308.o |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/protos/libmvp-core-protos.a |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_388aef0b6ac03d4f661ae7a30e.o |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/quota/libclient-yc_private-quota.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/libpy3show_create_view.global.a |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/meta_cache_ut.cpp |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/core/harmonizer/ut/ydb-library-actors-core-harmonizer-ut |57.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/ut/ydb-library-actors-interconnect-ut |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/pqtablet/partition/mlp/ut/ydb-core-persqueue-pqtablet-partition-mlp-ut |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_e31620202d3ba8df14ff2a18e1.o |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/workload/libpy3show_create-view-workload.global.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/workload/objcopy_fecdbbd3b168ee83928a6be126.o |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/workload/objcopy_108c71299bb0fd7f230af129b0.o |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/workload/libpy3stress-cdc-workload.global.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7f02665786b7523f76c02ad1dd.o |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_b9fcf9641e3e569e88014f85ff.o |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7c0098f27edc25092453a8033c.o |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/limiter/grouped_memory/ut/ydb-core-tx-limiter-grouped_memory-ut |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/defrag/defrag_actor_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_9818d2b70aad7db98a0f9c044c.o |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/streaming/streaming |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7eade8c49389813f8c36b72b5b.o |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7bfd03a31f5e230607792f10cc.o |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_45b6981aed17dda33d43217f52.o |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_3209cda00462f2963f3cbbc912.o |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/add_column/ydb-tests-datashard-add_column |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/registry/libcpp-dwarf_backtrace-registry.global.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/datastreams_helpers/libpy3tests-tools-datastreams_helpers.global.a |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_0035b673555f394234ae284e25.o |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tools/join_perf/libkqp-tools-join_perf.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |58.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/objcopy_9ccdc4f01b578a43bc35d4d519.o |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/init/init_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/http/http_ut.cpp |58.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_4f92526e13553482736b942b2c.o |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/blob/blob_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_hash_join_ut.cpp |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/libpy3tools-lib-cmds.global.a |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/meta/libydb-mvp-meta.a |58.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_1a1e300767b552f4c13c3295d0.o |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/http_api_client/libpy3fq-libs-http_api_client.global.a |58.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/objcopy_c55121179eeb3b5753498290c4.o |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/blob/type_codecs_ut.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/libydb-mvp-core.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/benchmark_base/state_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/provider/yql_dq_provider_ut.cpp |58.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic/workload/libpy3stress-topic-workload.global.a |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source_ut.cpp |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/libproviders-dq-scheduler.a |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/iceberg_ddl_ut.cpp |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/workload/tpch/ut/queries_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_hash_combine_ut.cpp |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/object_storage_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/partition_key_range/partition_key_range_sequence_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/scheduler/ut/dq_scheduler_ut.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/topic/utils/libintegration-topic-utils.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/rdma/ut/utils/librdma-ut-utils.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/ut/utils/libcomp_nodes-ut-utils.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/fetcher/fetch_request_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/describer/describer_ut.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut/connection_checker_ut.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut/interconnect_ut.cpp |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/workload/objcopy_16fe303ead399e4e3e388d7d17.o |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut/large.cpp |57.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut/sticking_ut.cpp |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/workload/objcopy_d0bb31c1ca2f7f4c842859052f.o |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/batched_vec_ut.cpp |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/provider/yql_s3_listing_strategy_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/bufferwithgaps_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/base/ptr_ut.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/dqrun |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32 |58.0%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic_kafka/workload/objcopy_d6a1b41fc15f14862f8ec129ed.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |58.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/ctas/ctas |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/ydb/v1/libyc_private-ydb-v1.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/arrow/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic/workload/objcopy_456ef6a09159ceaa7dd7dffa59.o |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/common/microseconds_sliding_window_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/arrow/unittest |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/discovery/ydb-core-kqp-ut-discovery |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_f8b2cbafb1fed0e25bf9683c2d.o |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_types_proto_ut.cpp |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/arrow/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/groupinfo/ut/unittest |58.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_d709b1895f91108d9f51b703ea.o |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut_fat/actor_benchmark.cpp |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/objcopy_691607a9cbabb8d8c0161d1a6d.o |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |58.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_ec9bc627b6d56d1a941c2b7e4f.o |58.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |58.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_fulltext.cpp |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/scheme_borders_ut.cpp |58.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/common/metadata_ut.cpp |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |58.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_b34c6a8a5501db208eebc5d8e4.o |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/aba998449c2518e3272d8e87fb_raw.auxcpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/common/encryption_ut.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |58.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/mem_pool.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_e32003454342267c2263935765.o |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_9a3dabea847c21e0b4fa4cda26.o |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |58.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_cca8dcd66462c9ca3c57fcb78e.o |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |58.6%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/objcopy_e64be2702e6aadcfe4f62214e0.o |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/harmonizer/ut/cpu_count_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/harmonizer/ut/shared_info_ut.cpp |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/ut/protos/libinterconnect-ut-protos.a |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/ut/lib/tls/libut-lib-tls.a |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/harmonizer/ut/history_ut.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/ut/lib/port_manager/libut-lib-port_manager.a |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/objcopy_1ac33161075322a9a0efb211c2.o |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/mlp/mlp_ut.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/workload/libpy3stress-streaming-workload.global.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/harmonizer/ut/harmonizer_ut.cpp |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/add_column/objcopy_04ccb9b757b207bc74705e9bb1.o |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/libpy3streaming.global.a |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/add_column/objcopy_b9596990f3fd41de0fa350fc68.o |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/mlp/mlp_counters_ut.cpp |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/add_column/objcopy_d54fb5ab35d376fe3311e9feea.o |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/mlp/mlp_dlq_mover_ut.cpp |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/public/mlp/ut/common/libmlp-ut-common.a |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/limiter/grouped_memory/ut/ut_manager.cpp |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_dfbd751fc64901b06ded4354c8.o |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/mlp/mlp_consumer_ut.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/mdb_endpoint_generator_ut.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/workload/objcopy_1fa81513299be967ec5e9e5770.o |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/workload/objcopy_bcc049f94d208d0695f84c335f.o |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/workload/objcopy_8b1494cbc491521341d984598d.o |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/libpy3workload_kv.global.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_ut.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/common/libpy3tests-stress-common.global.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/objcopy_caf222d14387d4810b5cb3e853.o |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_filter_kmeans.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/config_ut.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/stats_collector/libproviders-dq-stats_collector.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/utils_ut.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/memory/libqplayer-storage-memory.a |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/dq_task_preprocessor/libproviders-yt-dq_task_preprocessor.a |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/file/metadata/interface/libfile-metadata-interface.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/file/upload/impl/libfile-upload-impl.a |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/fmr_tool_lib/libyt-fmr-fmr_tool_lib.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/interface/libcoordinator-yt_coordinator_service-interface.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/file/metadata/impl/libfile-metadata-impl.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/file/libcoordinator-yt_coordinator_service-file.a |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_6403bfa5c5e35b29a21c73fb0e.o |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/impl/libcoordinator-yt_coordinator_service-impl.a |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_04f56802b68450abc8421282d0.o |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/libcoordinator-interface-proto_helpers.a |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/workload/libpy3stress-ctas-workload.global.a |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/client/libfmr-coordinator-client.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/libpy3ctas.global.a |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/objcopy_3cb499a0fcc9aa014af2855233.o |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/workload_mixed |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/file/libqplayer-storage-file.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/check/libv1-complete-check.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/core/libv1-complete-core.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_c7c229be41e9b028572ad1aab3.o |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_ext/libessentials-core-pg_ext.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_5294a064c14cf5a49516321590.o |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/libessentials-core-url_lister.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_system/libyql-utils-actor_system.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/simple/libcore-cbo-simple.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dqrun/dqrun.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/yql/libcomplete-analysis-yql.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/mlp/mlp_storage_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/bindings/libyql-utils-bindings.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/actors/libproviders-clickhouse-actors.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/actors/libproviders-ydb-actors.a |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_08f7acdb6eb761b28bf6990862.o |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/comp_nodes/libproviders-ydb-comp_nodes.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/libcomplete-name-object.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/libname-object-simple.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cluster/static/libname-cluster-static.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/local/libcomplete-analysis-local.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/static/libobject-simple-static.a |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/cluster/libname-service-cluster.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/column/libname-service-column.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/double_indexed_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/schema/libname-service-schema.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_test_runtime.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/global/libcomplete-analysis-global.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/object_listers/yql_s3_path_ut.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/check/libv1-format-check.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/cdc |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/discovery/kqp_discovery_ut.cpp |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/libfmr-coordinator-interface.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.global.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/yql_facade_run/libessentials-tools-yql_facade_run.a |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/file/upload/interface/libfile-upload-interface.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/libessentials-core-url_preprocessing.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_preparer/interface/libfmr-job_preparer-interface.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/binding/libname-service-binding.a |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/streaming/tests/ydb-tests-stress-streaming-tests |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/discovery/interface/libtable_data_service-discovery-interface.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/core/mvp_tokens.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/interface/libfmr-job-interface.a |59.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload/libpy3stress-testshard_workload-workload.global.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/tools/ytrun/lib/libtools-ytrun-lib.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/interface/libfmr-gc_service-interface.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/interface/libfmr-job_factory-interface.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/impl/libfmr-gc_service-impl.a |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_group/main.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_launcher/libyt-fmr-job_launcher.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_preparer/impl/libfmr-job_preparer-impl.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/impl/libfmr-job_factory-impl.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/client/proto_helpers/libtable_data_service-client-proto_helpers.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/process/libyt-fmr-process.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/discovery/file/libtable_data_service-discovery-file.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/libyt-fmr-request_options.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_url_lister/libyt-lib-yt_url_lister.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/client/impl/libtable_data_service-client-impl.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/interface/libfmr-table_data_service-interface.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/impl/libfmr-yt_job_service-impl.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/file/libfmr-yt_job_service-file.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/impl/libtable_data_service-local-impl.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/impl/libfmr-job-impl.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/interface/libtable_data_service-local-interface.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/interface/libfmr-worker-interface.a |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/secret_masker/dummy/liblib-secret_masker-dummy.a |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/proto_helpers/libfmr-request_options-proto_helpers.a |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/utils/libyt-fmr-utils.a |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/interconnect-rdma-ut_mem_pool_limit_128 |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/impl/libfmr-worker-impl.a |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |58.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_login/unittest |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/interface/libfmr-yt_job_service-interface.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/proto/libyt-fmr-proto.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_fulltext_dict.cpp |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |59.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv_volume/workload/libpy3stress-kv_volume-workload.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |59.1%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_bc86b379db0e87d023f98a592e.o |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_topic_reader/unittest |59.2%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_49e50404ca7d8e93240b3617b2.o |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_unique_index.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/fmr/libyt-gateway-fmr.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/workload_service/ut/unittest |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sysview/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask/py3/libpy3python-Flask-py3.global.a |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_c0a0299090f532c29f2ee6dc87.o |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/itsdangerous/py3/libpy3python-itsdangerous-py3.global.a |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/debug_log.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/scheme_board/pile_promotion/objcopy_e99c9b04005e36c324dfb9fd3b.o |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/libpy3workload_mixed.global.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |59.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_bf578b7161cc94bf18488d04ca.o |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_e7477203b27fa0321cf18fd7ee.o |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/849c58233edc33539cbeb93a31_raw.auxcpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_f928a40774b17a9d6cd7cabd2c.o |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/libpy3cdc.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_b8d63b589074145793d63c27a3.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/objcopy_7d7339f4588397fc771e31030c.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_c98e5b95c64b8486a12f10d408.o |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/tests/objcopy_4d329553dae503930cad7a3eec.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/tests/objcopy_f022524c80d73c5ff5d59211dd.o |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/streaming/tests/objcopy_6dc907e08e2d6bd66a6c998081.o |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_8dbc00b2eb10d4ff1ed6e47c3a.o |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_4488a798bff4dfb5b10f31aa13.o |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/click/py3/libpy3python-click-py3.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/workload/libpy3stress-node_broker-workload.global.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut_fat/waiting_benchs.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/runtime/ut/file_cache_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/public/types_ut.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_inference/libydb-library-arrow_inference.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.global.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-sqs/liblibs-aws-sdk-cpp-aws-cpp-sdk-sqs.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_6508d12aaafde6f0a60fe8fff3.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_cd9abca883cad9b25e20bf2f08.o |59.5%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv_volume/workload/objcopy_13cb62373e81699a2d2c9afb86.o |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/terminate_policy/libudf-service-terminate_policy.global.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/no_llvm/libminikql-codegen-no_llvm.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/no_llvm/libcodec-codegen-no_llvm.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/yt/actors/ut/yql_yt_lookup_actor_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/workload/libpy3stress-log-workload.global.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/no_llvm/libminikql-computation-no_llvm.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/no_llvm/libyt-comp_nodes-no_llvm.a |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_bd84885c5c24478d181ba9e493.o |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/ai/libcommands-interactive-ai.a |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/session/libcommands-interactive-session.a |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.global.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/validators_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/with_quotas/py3test |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/mock/libcommon-http_gateway-mock.a |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_large/ut_btree_index_large.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut_large.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_3bb523a1011c0a7019f2684a90.o |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_e8c94c485e81b4b2899f52f594.o |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_cd57da3671b96739ee73293fb1.o |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/ai/tools/libinteractive-ai-tools.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/ai/models/libinteractive-ai-models.a |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/workload/libpy3stress-s3_backups-workload.global.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/messaging/py3test |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/pqtablet/quota/ut/ydb-core-persqueue-pqtablet-quota-ut |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/range_treap/ut_range_treap/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/schemeshard-ut_incremental_restore_reboots |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/row_dispatcher/ut/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/common/libcommands-interactive-common.a |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |59.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/vector/liblibrary-workload-vector.a |59.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/streaming/tests/py3test |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/ydb-tests-sql |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/simple_queue |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.a |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sqs_workload/sqs_json/libsqs_json.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |59.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydbd/export/libapps-ydbd-export.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_keys/unittest |59.4%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/workload/objcopy_3cad7ffe5c68e883c93b3c3b3a.o |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/main.cpp |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv_volume/workload/ydb_cli |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/test-results/unittest/{meta.json ... results_accumulator.log} |59.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/statistics_internal/libstatistics_internal_udf.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/restarts/py3test |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic_kafka/tests/ydb-tests-stress-topic_kafka-tests |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_util/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/config/ut/unittest |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sqs_workload/libsqs_workload.a |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |59.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/impatient/libname-service-impatient.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/local/libname-cache-local.a |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/libcomplete-name-cache.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv_volume/workload/objcopy_910f6a6af5a77490b33aeccdbd.o |59.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv_volume/workload/objcopy_03c4a9af7660eaecdea048a241.o |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/cached/libobject-simple-cached.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/colorama/py3/libpy3python-colorama-py3.global.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_way.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_group.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcc/liblibrary-workload-tpcc.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_now.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_nop.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/workload/objcopy_1a5e4cdba128ffec3e3b5af433.o |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/workload/objcopy_12968ff6e39e65b31509eac60c.o |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_pickle.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reduce.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_removemember.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/common/liblibrary-testlib-common.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_coalesce.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_udf.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_test_shard.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_auth.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_to_list.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_logstore.pb.{h, cc} |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_take.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/pq_helpers/liblibrary-testlib-pq_helpers.a |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |59.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage_2_ring_groups.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/quota/write_quoter_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/error.pb.{h, cc} |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_scalar_apply.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.{pb.h ... grpc.pb.h} |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_program_step.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/actorlib_impl/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_view_v1.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_test_shard_v1.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_cms_v1.{pb.h ... grpc.pb.h} |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_query_v1.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_operation_v1.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_monitoring_v1.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scripting_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_scheme_v1.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/fq_v1.{pb.h ... grpc.pb.h} |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |60.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |60.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/security/ticket_parser_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |59.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/wrappers/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/nemesis/ut/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_f4b44a5d280d0f27f5ffd278e8.o |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_shard/py3test |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_08a4b5d38a76e21591db0c3424.o |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.{pb.h ... grpc.pb.h} |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_compaction/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut/unittest |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_e2637cea0f2e4db109b364a246.o |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_table_writer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |60.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/api/py3test |60.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut_pg/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/unittest |60.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/ydb-core-blobstorage-ut_blobstorage-ut_bridge |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ymq/actor/yc_search_ut/unittest |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |60.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/testshard_workload/tests/py3test |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/show_create/view/tests/py3test |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/kqp/kqp_indexes/unittest |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/tests/objcopy_3310cbcd39c3373557308c8e76.o |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/quota/quota_tracker_ut.cpp |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |60.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/scheme_tests/py3test |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/tests/objcopy_4ffdb694eb351ca96de1930bf2.o |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/tests/objcopy_e91d43b449a687b2b36f1f5526.o |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |60.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |60.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_streaming_query/ydb-core-tx-schemeshard-ut_streaming_query |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |60.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |59.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/external_sources/ut/unittest |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/ymq/http/ut/unittest |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/ydb-core-tx-schemeshard-ut_consistent_copy_tables |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/yt/actors/ut/unittest |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |59.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/pq_async_io/ut/unittest |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/splitter/ut/unittest |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/examples/01_ping_pong/example_01_ping_pong |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |58.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/opt/ut/unittest |58.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/bridge_get.cpp |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/objcopy_6c8bedcdc8efb835a928b278ce.o |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/libpy3simple_queue.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_column_filter.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_83efacabe56767ae4f106a6d27.o |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_2f0e0ac8198858b9ec9901778e.o |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_f738234258cd034cd5383f92ad.o |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_streaming/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/distconf_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_slicer.cpp |59.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_hash.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_printer.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tostring.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_arrow.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_reader.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tooptional.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore_reboots/ut_incremental_restore_reboots.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chain_map.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chopper.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_withcontext.cpp |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_zip.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_apply.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |59.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_while.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |59.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/fq_private.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_replication.pb.{h, cc} |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/events.pb.{h, cc} |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_rate_limiter.pb.{h, cc} |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/resource.{pb.h ... grpc.pb.h} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/access_service.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/sensitive.{pb.h ... grpc.pb.h} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/resource.{pb.h ... grpc.pb.h} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/operation.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/viewer.pb.{h, cc} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/user_account_service.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/audit/annotations.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/rpc/status.{pb.h ... grpc.pb.h} |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_303f7409bfab4277e367bbd11a.o |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/http.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/access_service.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/sqs/libpy3tests-library-sqs.global.a |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/examples/01_ping_pong/main.cpp |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_compile_settings.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_fd8d9957a06c9923c501e36fd9.o |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_8491a772a9425d10f304e6f0e9.o |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/incompatibility_rules_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/main.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/no_llvm/libdq-comp_nodes-no_llvm.a |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_channels_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_sync_compute_actor_ut.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_async_compute_actor_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sharding/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sharding/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sharding/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/ut/proto/libcompute-ut-proto.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_table_writer/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_auditsettings/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/server/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk2/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/hive/py3test |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_subdomain/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/mock_lookup_factory.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/table_creator/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/no_llvm/libminikql-invoke_builtins-no_llvm.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/ydb_convert/ut/unittest |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_source_watermark_tracker_ut.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sharding/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/yql/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_issues_buffer_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sharding/ut/unittest |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_async_input_helper_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_table/unittest |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/quoter/ut/unittest |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |58.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/ydb-core-tx-schemeshard-ut_resource_pool |58.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/knn/test-results/unittest/{meta.json ... results_accumulator.log} |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_testshard/unittest |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |58.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/test-results/unittest/{meta.json ... results_accumulator.log} |58.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/test-results/unittest/{meta.json ... results_accumulator.log} |58.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |58.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/examples/02_discovery/example_02_discovery |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/table/show_create_table |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/gtest |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/dstool/ydb-tests-functional-dstool |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |57.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/test-results/unittest/{meta.json ... results_accumulator.log} |57.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_shred/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/executer_actor/ut/unittest |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_replication/unittest |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/async/benchmark/benchmark_actors_async |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/solomon/reading/test-results/py3test/{meta.json ... results_accumulator.log} |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kafka/tests/ydb-tests-stress-kafka-tests |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_registry.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |57.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |57.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_6e0da74b1512d0ffe19c5dc500.o |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_auth.cpp |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/transfer/ut/row_table/unittest |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_791e2f78c18891d943ecce5e41.o |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/ut_consistent_copy_tables.cpp |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_7211c23d9494c46f0f60063e9e.o |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_48a08121f0a68da2f2666b0341.o |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_f05ead59375a9db120b95dd730.o |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/log_backend/json_envelope_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.a |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_bcbbd2d8f2367d5f3ed5199234.o |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_1aeeb50f676472f975830c135d.o |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_e2a089b95d9316f6e26025d3e3.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_253d734e8c901d319d84fcc6e9.o |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_stream_indexes_ut.cpp |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_363b5875cc5c5e5745458b16b8.o |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_streaming_query/ut_streaming_query.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_verify_failure_regression.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.global.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_fetcher_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_state_ut.cpp |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/io_formats/arrow/scheme/csv_arrow_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/kqp_compute_scheduler_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |58.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_import.pb.{h, cc} |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |58.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |58.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_445797246443360525d31550d1.o |58.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_fulltext_ut.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize.cpp |58.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.{h, cc} |58.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_datastreams_v1.{pb.h ... grpc.pb.h} |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/fixtures/libpy3tests-library-fixtures.global.a |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/dstool/objcopy_8d4997dfd93843149bbf2f76f1.o |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/dstool/objcopy_281dee3582d4ce59ddf75426c6.o |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |58.4%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/yql_expr_nodes.{gen.h ... defs.inl.h} |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_string_ut.cpp |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_e2acb41e7099c0db4fe54a1587.o |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/util_pool_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/query/query_spacetracker_ut.cpp |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_cf3971576aced18377e99f5367.o |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_7c81cbfa6b5ce112674cb0a849.o |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/tests/objcopy_2900a0c4957bb4f1bc1729508c.o |58.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufstream_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/memusage_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/http_ut.cpp |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_pdisk_error_ut.cpp |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/lib/libpy3dstool_lib.global.a |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/datastreams_fixture/libhttp_proxy-ut-datastreams_fixture.a |57.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/examples/02_discovery/publish.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/examples/02_discovery/protocol.pb.cc |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/examples/02_discovery/endpoint.cpp |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_rollback.cpp |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |57.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/examples/02_discovery/main.cpp |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/examples/02_discovery/lookup.cpp |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |57.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| COMPACTING CACHE 112.0KiB |57.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |57.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minikql/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_object_storage_listing/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/cloud/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/skeleton/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_shred/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_bsvolume/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/solomon/reading/py3test |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_volatile/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data_integrity/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/incrhuge/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_volatile/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_locks/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_locks/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_locks/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/provider/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_schema/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_stream_creator/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/engine/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/secret/ut/unittest |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |58.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blob_depot/ut/unittest |58.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/ut_blobstorage-ut_cluster_balancing |58.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |58.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_vacuum/ydb-core-tx-datashard-ut_vacuum |58.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/topic/tests/py3test |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/tpch/tpch |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/knn/unittest |58.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/datastreams/ydb-core-kqp-ut-federated_query-datastreams |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/async/ut/ydb-library-actors-async-ut |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_rs/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/node_broker/tests/py3test |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/common/http_gateway/ut/ydb-library-yql-providers-common-http_gateway-ut |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/tests/objcopy_c386e2211742a44d16094d73d0.o |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/long_tx_service/ut/unittest |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |58.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/checkpointing/ut/unittest |57.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/query_actor/ut/unittest |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/s3_bench/ydb_s3_bench |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_secret/ydb-core-tx-schemeshard-ut_secret |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/solomon/reading/ydb-tests-solomon-reading |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |58.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/config/init/ut/unittest |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |57.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |57.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |57.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/objcopy_dcbdf62672440a626e79a64e14.o |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebuf_ut.cpp |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/circlebufresize_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_outofspace_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/13360e4ecdf34efe6c3a817a44_raw.auxcpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_config_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/ut/test_connection_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_lsnmngr_ut.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/tests/objcopy_dac3ec236f3cba753ea226bb96.o |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/common/vdisk_syncneighbors_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hullop/ut/unittest |58.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/public_http/ut/unittest |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |58.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic_kafka/workload_topic_kafka |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/common/util_ut.cpp |58.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/dstool/1bbc8f31f8eb46680aacf37392_raw.auxcpp |58.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/objcopy_970514ee5aa7605a49b54b8feb.o |58.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |58.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/benchmark/b_actor_async.cpp |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |58.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} |58.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compile_service/ut/ydb-core-kqp-compile_service-ut |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/libpy3ydb-dstool.global.a |58.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |58.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/libpy3show_create_table.global.a |58.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/dstool/objcopy_88c3bdc430d7cb7f217b6a7f5c.o |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/examples/02_discovery/replica.cpp |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |58.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/workload/libpy3show_create-table-workload.global.a |58.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/cluster_balancing.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |58.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_find_split_key.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_resource_pool/ut_resource_pool.cpp |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |58.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_visitall.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_source.cpp |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_narrow_map.cpp |58.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/udf_resolver.pb.{h, cc} |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_measure_arg.cpp |58.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_varitem.cpp |58.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_linear.cpp |58.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lookup.cpp |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_enumerate.cpp |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_exchange/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |58.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |58.6%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/yql_res_expr_nodes.{gen.h ... defs.inl.h} |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_bridge.pb.{h, cc} |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/dqs.pb.{h, cc} |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/datastreams.pb.{h, cc} |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/portion_info.pb.{h, cc} |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_write/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/wait_for_event_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompressor/rescompressor |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/continuation_ut.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/timeout_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_readbatch_ut.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/task_group_ut.cpp |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v2.{pb.h ... grpc.pb.h} |58.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_4826ee2207124da1bc398e3bd8.o |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_write/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_pool.cpp |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.{pb.h ... grpc.pb.h} |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_8685c3ae88e5169a5acffc7bc4.o |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_exchange/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_write/unittest |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_auth/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_write/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_exchange/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/util/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/query/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_exchange/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/objcopy_33061e6ec0580baa7f93f1ce23.o |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/column_family/compression/py3test |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_write/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic_kafka/libpy3workload_topic_kafka.global.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_dst_creator/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_fat/unittest |58.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/scheme/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/compile_service/ut/kqp_compile_fallback_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/health_check/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/compute/common/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_read_table/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_read_table/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_read_table/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_read_table/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_read_table/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_generic_it_ut.cpp |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullsatisfactionrank_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcompactdeferredqueue_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/callback_coroutine_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_blob_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullbase_barrier_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_ff581f3cff717ab223922f0cd8.o |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/hullds_heap_it_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_server_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/base/blobstorage_hullstorageratio_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_d191482d8b66f1c03ea8df56d3.o |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_snapshot_readonly.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/solomon/reading/objcopy_1c931ae3496b883d009f58ef8e.o |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/client/libpy3tools-solomon_emulator-client.global.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_5b5c3367c789898aa5a6cae866.o |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/ut/unittest |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_8e57113197bb359e3999b04aab.o |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/solomon/reading/objcopy_5233daad98e22a16d60b4b7b86.o |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/retry/py3/libpy3python-retry-py3.global.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_816e2dba53f55d924139cdb3c5.o |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_b9aaa278b10ed44e5645b3ef2f.o |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_e4166f3d104a6751b45e7e712f.o |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/common/libpy3functional-postgresql-common.global.a |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/solomon/reading/objcopy_38c6001204b7ada03b8b3e421d.o |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/backpressure/queue_backpressure_client_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_secret/ut_secret.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/cancellation_ut.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/spilling/spilling_file_ut.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/s3_bench/main.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hullop/hullop_delayedresp_ut.cpp |58.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/run/ut/unittest |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/common/py3test |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_group/unittest |59.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/http/ut/unittest |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |59.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/stability/healthcheck/libpy3library-stability-healthcheck.global.a |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_b9fd5c62781ec3b78d111a0ba7.o |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/no_llvm/libminikql-comp_nodes-no_llvm.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_drop.cpp |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/blobsan/blobsan |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/table/tests/ydb-tests-stress-show_create-table-tests |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/common/http_gateway/yql_aws_signature_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/common/http_gateway/yql_dns_gateway_ut.cpp |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |59.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/sleep_ut.cpp |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a5874452d3dbd6f6e49cd08be6.o |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/event_ut.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/async/ut/async_ut.cpp |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/solomon_helpers/liblibrary-testlib-solomon_helpers.a |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk_io/ydb-core-blobstorage-ut_vdisk_io |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare_scheme.cpp |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a38b1580810a6e4b419da99dcf.o |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_query.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot_isolation/unittest |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_stats/unittest |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_9ea5b1fb7a4f8e1b0b8d7cf345.o |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/commands.cpp |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/interconnect/rdma/ut/gtest |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.global.a |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/public/list_topics/ut/ydb-core-persqueue-public-list_topics-ut |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/wilson/ut/ydb-library-actors-wilson-ut |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_7f9e816a97aaeee837ac316091.o |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.a |59.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/base/ut/gtest |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/main.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/comp_nodes/ut/unittest |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |59.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/common/http_gateway/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/io_formats/arrow/scheme/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/example/py3test |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/statistics/test-results/py3test/{meta.json ... results_accumulator.log} |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/describe_ut.cpp |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_shred/ydb-core-tx-schemeshard-ut_shred |58.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_timestamp_ut.cpp |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/postgresql/py3test |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/server_restart/gtest |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/pqtablet/partition/ut/gtest |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/backup/s3_path_style/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/encryption/py3test |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/limiter/grouped_memory/ut/unittest |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/simple_queue/tests/py3test |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/solomon/py3test |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_hash_shuffle_ut.cpp |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/generic/actors/ut/unittest |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |58.8%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/stability/healthcheck/objcopy_75ddb319f6ea5a5e05fb01b29c.o |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_autoscaling_ut.cpp |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |58.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hasitems.cpp |58.9%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_vdisk_io/vdisk_io.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |58.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/fmr.pb.{h, cc} |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/datastreams/datastreams_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_vacuum.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_toindexdict.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |58.9%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |58.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/serializability/libpy3tests-library-serializability.global.a |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_switch.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_invoke.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut_trace/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |58.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join_imp.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/dq_effects.pb.{h, cc} |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |59.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stability/healthcheck/objcopy_fbeafc7f011525fda6ed23e6e3.o |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stability/healthcheck/objcopy_98478d217505ced1e38b6bd497.o |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/tests/objcopy_1815f02732d96389c328f04d90.o |59.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/metrics/libproviders-dq-metrics.a |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/tests/objcopy_f0d8fb718a757998dc9403df32.o |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut_trace/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_matrix_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/blobsan/main.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/worker_node/main.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.1%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.{gen.h ... defs.inl.h} |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |59.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |59.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_82d6d29ac7be3798b7e748facc.o |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/anubis_osiris/blobstorage_anubis_algo_ut.cpp |59.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut_trace/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |59.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_59eb97971e5f83d3296e6c33b5.o |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_60e08504076128d310212c6460.o |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_shred/ut_shred.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_5acd2383ed2cd599cfd64f7c8a.o |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/global_worker_manager/libproviders-dq-global_worker_manager.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/slow/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/slow/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/slow/unittest |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.4%| RESOURCE $(sbr:4966407557) |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/slow/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/ut/slow/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_external_blobs/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_monitoring/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ldap_auth_provider/ut/unittest |59.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_extsubdomain/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/common/http_gateway/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/query_stats/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/statistics/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut_trace/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/service/ut/ut_aggregation/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut_trace/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/dynamic_config/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_union_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_hash_combine_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/ydb/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_order/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/syncer/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/impl/ut_local_partition_reader/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/huge/ut/unittest |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |59.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/mvp/meta/ut/unittest |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |59.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |59.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot_isolation/ydb-core-tx-datashard-ut_snapshot_isolation |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/viewer/tests/py3test |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/graph/ut/unittest |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/public/list_topics/ut/unittest |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/dnsresolver/ut/ydb-library-actors-dnsresolver-ut |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/public/describer/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/high_load/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/dstool/py3test |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/tx-schemeshard-ut_continuous_backup_reboots |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/interconnect/ut_rdma/gtest |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/config/validation/column_shard_config_validator_ut/unittest |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/compile_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/control_plane_proxy/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/basic_example/gtest |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/wilson/wilson_trace_ut.cpp |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/tool |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/meta/bin/mvp_meta |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |59.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} |59.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/list_topics/list_all_topics_ut.cpp |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |58.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/table/tests/objcopy_df04396057094f2483296b9dbe.o |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/sqs_topic_ut/ydb-core-http_proxy-ut-sqs_topic_ut |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_589d529f9477963cf67237781c.o |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/core/ut/ydb-library-actors-core-ut |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |58.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/test-results/unittest/{meta.json ... results_accumulator.log} |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/ingress/blobstorage_ingress_ut.cpp |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_b1ab101896e634020e0c6ffeaf.o |58.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk_io/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |58.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/tests/sql/solomon/pytest |58.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |58.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |58.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/yt/kqp_yt_import/py3test |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/public/partition_key_range/ut/unittest |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |58.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_errors/unittest |58.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_errors/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_populator/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_backup/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |58.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_prepend.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |58.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multihopping.cpp |58.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_51b071d7746089933668451b33.o |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |58.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reverse.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_filter.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_logical.cpp |58.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense1.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_mapnext.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_seq.cpp |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_collect.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_replicate.cpp |59.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_safe_circular_buffer.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/yt/libdq-actors-yt.a |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |59.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_round.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_9be8b6745d0fa150928bab4206.o |59.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_8e19d47784789c55156c57f816.o |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/libpy3stress-oltp_workload-workload.global.a |59.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/type/libpy3oltp_workload-workload-type.global.a |59.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_mutdict.cpp |59.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |59.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/service/query_history_ut.cpp |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |59.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |59.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |59.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |59.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/api.{pb.h ... grpc.pb.h} |59.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_keyvalue.{pb.h ... grpc.pb.h} |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/pgproxy/pg_proxy_ut.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/main/libtesting-benchmark-main.global.a |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_pq_old.{pb.h ... grpc.pb.h} |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/main.cpp |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/libpy3statistics_workload.global.a |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/mon_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/scheduler_actor_ut.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |59.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_fulltext_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |59.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/cms/cms_ut.cpp |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_block_hash_join_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_topology_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/process_stats_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/executor_pool_semaphore_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/mailbox_lockfree_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actor_exception_ut.cpp |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/linear_regression/liblibrary-cpp-linear_regression.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/accurate_accumulate/liblibrary-cpp-accurate_accumulate.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actor_shared_threads.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actor_coroutine_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actor_basic_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/event_pb_payload_ut.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/meta/bin/main.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| PREPARE $(FLAKE8_PY2-2255386470) - 8.40 MB |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/event_pb_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actorsystem_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/executor_pool_basic_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/performance_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/ask_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/benchmark_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/log_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/actor_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/ut/executor_pools_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/row_table/ydb-core-transfer-ut-row_table |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/control/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/fq/ut_integration/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_proccessor.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_backup/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_schemeshard_build_index_helpers.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/main.cpp |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_fulltext_build.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_user_attributes/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_22b5b8dd6ea05f4194f60e6181.o |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/spilling/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/pg/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_vdisk_io/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/proxy_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/oom/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/runtime/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_id_dict_ut.cpp |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_backup/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_backup/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/data/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_errors/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/compile_service/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/tablet/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/perf/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/common/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_60a4829fdc305e3a74a7ddcb41.o |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/engines/ut/unittest |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_background_compaction/unittest |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/result_set_format/result_set_format |60.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/testlib/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/viewer/tests/py3test |60.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic/workload_topic |60.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/formats/arrow/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/kqprun/tests/py3test |59.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/workload/tpch/ut/unittest |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_system_names/ydb-core-tx-schemeshard-ut_system_names |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/close_with_load/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_benches_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/range_treap/ut_range_treap/ydb-library-range_treap-ut_range_treap |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/kqp/kqp_query_session/unittest |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_complex_join_query_ut.cpp |59.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/core/event_pb.cpp |59.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |59.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/ymq/base/ut/unittest |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |59.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/sqs_topic_ut/inside_ydb_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_topology_generator.cpp |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/result_set_format/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/objcopy_b4ebb94deb4cea673457b77fcc.o |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/workload/libpy3stress-statistics_workload-workload.global.a |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/testlib/actors/test_runtime_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |59.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/defrag/ut/unittest |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_7406de026bf25e30e96a88517d.o |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/benchmark/libcpp-testing-benchmark.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |59.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/wilson/ut/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/query_cache/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/tests/kikimr_tpch/unittest |59.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/workload/libpy3stress-transfer-workload.global.a |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_benchmark/main.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |59.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |60.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/ydb-core-tx-schemeshard-ut_topic_set_boundaries |60.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |59.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_76cd981cf66123b7633d25b898.o |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/provider/ut/pushdown/pushdown_ut.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yaml_config/ut_transform/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/ctas/tests/py3test |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |59.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/workload/ydb_cli |59.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/ut_continuous_backup_reboots.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/cfg/bin/ydb_configure |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/sqs_topic_ut.cpp |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/join_perf/bin/join_perf |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/add_column/py3test |59.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |59.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_e68ca1a2fa9943132c020ae028.o |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_user_management_ut.cpp |59.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/test-results/unittest/{meta.json ... results_accumulator.log} |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp |59.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_367e2bc5d83faa0907a06d2976.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_0446f521b26a2e8128f94ac50f.o |59.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_49a1ca9559288648fba9cf7b65.o |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/perf/colons.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |59.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_pg/flat_database_pg_ut.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |59.6%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |59.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_replication/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/logger_config.pb.{h, cc} |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_append.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot_isolation.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/events.pb.{h, cc} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/config.pb.{h, cc} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/mon_proto.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |59.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/objcopy_e9d954952def1b899e1fb63731.o |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_488333b1ebd4c1d6d8ec5bcb8f.o |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/range_treap/ut_range_treap/range_treap_ut.cpp |60.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_3db6af291678d4ac330517956a.o |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |60.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.0%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/kqp_expr_nodes.{gen.h ... defs.inl.h} |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/load_test.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |60.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/objcopy_940b9a794cb8fbc6ebdf926276.o |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.{pb.h ... grpc.pb.h} |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/serverless_proxy_config.{pb.h ... grpc.pb.h} |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/nbs/blockstore.{pb.h ... grpc.pb.h} |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc_status_proxy.{pb.h ... grpc.pb.h} |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/actors/ut/yql_arrow_push_down_ut.cpp |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |60.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_bs_controller.{pb.h ... grpc.pb.h} |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_distributed_config.{pb.h ... grpc.pb.h} |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |60.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_1ab2a5a6dd84a6c9ff5d5c50b0.o |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/common/libpy3tests-olap-common.global.a |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_3bdea7737a87c43bfaa0aaf4c3.o |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/bsc_cache.cpp |60.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_6887bde1dc99f5c5c2f0922842.o |60.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-python-commons/libpy3contrib-python-allure-python-commons.global.a |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libpy3contrib-libs-googleapis-common-protos.global.a |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/corrupted_reads.cpp |60.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-pytest/libpy3contrib-python-allure-pytest.global.a |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |60.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libpy3client-yc_public-iam.global.a |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |60.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/testlib/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/ydb_proxy/ut/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_resource_pool/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/columnshard/ut_rw/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/batch_operations/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_rtmr/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/actors/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/common/result_set_format/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libpy3client-yc_public-common.global.a |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/result_formatter/ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/libpy3result_set_format.global.a |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_replication/unittest |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.global.a |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.global.a |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_export/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/objcopy_ab0068aecc73dfe57505e1cc57.o |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/federated_query/generic_ut/unittest |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/close_with_load/unittest |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part2/liblibs-clapack-part2.a |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libf2c/libcontrib-libs-libf2c.a |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |60.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/cancellation.cpp |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a |60.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |60.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_schemereq/unittest |60.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/ut_topic_set_boundaries.cpp |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |59.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cblas/libcontrib-libs-cblas.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/libpy3ydb_configure.global.a |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic/workload/ydb_cli |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/workload/libpy3stress-result_set_format-workload.global.a |59.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/olap_workload |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} |59.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/statistics_workload |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |59.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/topic_kafka_workload |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/ctas_workload |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/oltp_workload |59.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/transfer_workload |59.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/coordinator/ut/unittest |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.a |59.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/config/validation/auth_config_validator_ut/unittest |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_vacuum/unittest |59.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/durationpy/libpy3contrib-python-durationpy.global.a |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/node_broker_workload |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/join_perf/bin/main.cpp |59.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/grpc_services/tablet/ut/unittest |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |59.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |59.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/s3/common/ut/unittest |59.4%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload/ydb_cli |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/federated_query/ut_service/fast/unittest |59.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |59.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_1555e67a3dd43a3e7f09bf8eee.o |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sqs/merge_split_common_table/std/py3test |59.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/formats/arrow/ut/unittest |59.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/knn/ydb-core-kqp-ut-knn |59.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/lib/libpy3tests-olap-lib.global.a |59.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |59.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/helpers/libpy3olap-scenario-helpers.global.a |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |59.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/libpy3workload_topic.global.a |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |59.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/workload/type/libpy3result_set_format-workload-type.global.a |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_system_names/ut_system_names.cpp |59.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |59.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_size.cpp |59.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_extend.cpp |59.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_list.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/node_tracker_client/proto/node_directory.pb.{h, cc} |59.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/graph.pb.{h, cc} |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_just.cpp |59.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_addmember.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_timezone.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_callable.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_skiptake.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dynamic_variant.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flow.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dictitems.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_exists.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_factory.cpp |59.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sequence/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_factory.cpp |59.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_sequence/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_frombytes.cpp |59.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterator.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.global.a |59.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_heap.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_length.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_getelem.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold1.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_container.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_rows_formatter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_filter.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mul.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flatmap.cpp |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.a |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/cpp/src/arrow/python/libpy3src-arrow-python.a |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_top.cpp |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_backup_v1.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/ut/utils_ut.cpp |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |59.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_replication_v1.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/test_shard.{pb.h ... grpc.pb.h} |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_scale_manager_graph_cmp_ut.cpp |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/recoveryshard_config.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/db_metadata_cache.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |60.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |59.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |59.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |59.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/knn/kqp_knn_ut.cpp |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |60.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part1/liblibs-clapack-part1.a |60.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/solomon/ydb-library-yql-tests-sql-solomon |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.a |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/tools/simple_json_diff/simple_json_diff |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |60.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_set_boundaries/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_worker/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ycloud/impl/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_selfheal/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/restarts/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/audit/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/service/ut/unittest |61.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/persqueue/topic_parser/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut_fat/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/select/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_278b1a63a14648a80c4b930adb.o |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_11720a94357f4e62a79e34a420.o |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/discovery/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |61.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_system_names/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_6b37760fb6a28054d0feafd61d.o |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_5923b362516b6632b9769a5db2.o |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/time_cast/ut/unittest |61.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_streaming_query_reboots/core-tx-schemeshard-ut_streaming_query_reboots |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/objcopy_bcf2142e31bf537964dc063d11.o |61.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |61.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/partitioning/py3test |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/objcopy_c96ef635306ccee8a5cf6359f1.o |61.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/dq/provider/ut/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/ut_fat/ydb-library-actors-interconnect-ut_fat |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/helpers/ut/ydb-library-actors-helpers-ut |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_reassign/unittest |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_large/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/ut_helpers.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/raw_socket/ut/ydb-core-raw_socket-ut |61.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/registry_ut.cpp |61.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/validator_nameservice_ut.cpp |61.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/downtime_ut.cpp |61.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |61.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/pqtablet/blob/ut/unittest |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/transfer/ut/functional/unittest |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest |61.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/util/ut/ydb-core-util-ut |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_range_ops/unittest |61.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/validators/validator_bootstrap_ut.cpp |61.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |61.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |61.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |61.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |61.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/test-results/unittest/{meta.json ... results_accumulator.log} |61.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |61.0%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic_kafka/workload/ydb_cli |61.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_partition_stats/ydb-core-tx-schemeshard-ut_partition_stats |61.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/mvp/core/ut/unittest |61.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |61.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv_volume/workload_keyvalue_volume |61.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/tests/tools/pq_read/test/py3test |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |61.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/ymq/ut/unittest |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |61.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/kqp/plan2svg/py3test |61.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |61.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut.cpp |61.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |61.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |61.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/show_create/table/tests/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/test/libvdisk-hulldb-test.a |61.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_time_order_recover.cpp |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |61.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/compstrat/hulldb_compstrat_ut.cpp |61.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/viewer/viewer |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_random.cpp |61.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_skip.cpp |61.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_expand_map.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join_dict.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromyson.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_map.cpp |61.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/chunk_client/proto/read_limit.pb.{h, cc} |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_weakmember.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_contains.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/examples/02_discovery/protocol.pb.{h, cc} |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_keyvalue.pb.{h, cc} |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.{h, cc} |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_if.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_unwrap.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |61.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/test/tool/surg/main.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.{h, cc} |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.{h, cc} |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.{h, cc} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_streaming_query_reboots/ut_streaming_query_reboots.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_object_storage_v1.{pb.h ... grpc.pb.h} |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_debug_v1.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_table_v1.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_discovery_v1.{pb.h ... grpc.pb.h} |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_todict.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_import_v1.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/locks_ut.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_auth_v1.{pb.h ... grpc.pb.h} |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/wildcard_ut.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/v1/access_service.{pb.h ... grpc.pb.h} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/google/api/annotations.{pb.h ... grpc.pb.h} |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/iam/v1/access.{pb.h ... grpc.pb.h} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/persqueue.{pb.h ... grpc.pb.h} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/flat_ut.cpp |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.{pb.h ... grpc.pb.h} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kesus.{pb.h ... grpc.pb.h} |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv_volume/libpy3workload_keyvalue_volume.global.a |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5db899a01c2ec6f53648af6840.o |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/rm_service/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_oneone_inplace_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_olap/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/bits_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/circular_queue_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut_fat/main.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hazard_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/address_classifier_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_tls_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/hyperlog_counter_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/datastreams/ut/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/cache_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/stlog_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/sysview/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut_board_subscriber/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_base_tenant/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/concurrent_rw_hash_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/log_priority_mute_checker_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/token_bucket_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lz4_data_generator_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/lf_stack_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/btree_cow_ut.cpp |61.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_system_names/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/simple_cache_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_fixed_hash_set_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ui64id_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/page_map_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_heap_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/event_priority_queue_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/ulid_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_priority_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fast_lookup_unique_list_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/intrusive_stack_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/async_replication/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/fragmented_buffer_ut.cpp |61.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/dstool/ydb-dstool |61.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |61.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |61.6%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/dc048c91e67372877fc6ad2dfc_raw.auxcpp |61.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |61.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |61.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |61.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/tools/decrypt/decrypt |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/interval_set_ut.cpp |61.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/operation_queue_ut.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_00c87b13e2f685811a9825079d.o |61.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/graph/shard/ut/unittest |61.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} |61.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/replication/unittest |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5865a174a6c25ca1a2d6386702.o |61.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/pqtablet/partition/ut/ydb-core-persqueue-pqtablet-partition-ut |61.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest |61.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_c02c3d9f840d02af9fad858a55.o |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/transfer/tests/py3test |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv_volume/objcopy_fe7217acc26206b8b68bbd0969.o |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/backup/tests/py3test |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_bfa810e70cd1de18c5d4a18a62.o |61.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |61.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |61.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |61.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_move_pdisk |61.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |61.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_partition_stats/ut_top_cpu_usage.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/workload/benchmark_base/ut/unittest |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_streaming_query/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sequenceproxy/ut/unittest |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} |61.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/public/fetcher/ut/unittest |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |61.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} |61.5%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/workload/ydb_cli |61.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} |61.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |61.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/dq/scheduler/ut/unittest |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/oltp_workload/tests/py3test |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/example/ydb-tests-example |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |61.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/log_backend/ut/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/core/harmonizer/ut/unittest |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/ut/ydb-core-mon-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/scheme_board/pile_promotion/tests/py3test |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/http_api/py3test |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/raw_socket/ut/buffered_writer_ut.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/util/queue_inplace_ut.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/bin/main.cpp |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/core/manual_test/shared_threads/shared_threads |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/fq/libs/test_connection/ut/unittest |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/topic/tests/ydb-tests-stress-topic-tests |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_counters.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/kqp/kqp_query_svc/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/helpers/selfping_actor_ut.cpp |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/kafka/tests/py3test |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |61.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_bridge_common.pb.{h, cc} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_top_sort.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense.cpp |61.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/rpc/proto/rpc.pb.{h, cc} |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_guess.cpp |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_3ea8aa67e7c24c4f0e3b0406b9.o |61.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_combine.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/field_transformation.pb.{h, cc} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_5d73baff4bb68923ddbe5f4fcd.o |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/objcopy_fca89909cedb628068681e1038.o |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_2efdf95387a81f55cf9c81071a.o |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/config.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_debug.pb.{h, cc} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/libpy3functional-sqs-merge_split_common_table.global.a |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/address_classification/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ydb/table_split_ut/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |61.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_backup.pb.{h, cc} |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/tools/decrypt/main.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |61.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/709f125727d9ea4165df516509_raw.auxcpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_0ab925f82bbba07bf3b749dc3c.o |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/simple_reader_ut.cpp |61.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/bool_test_enums.h_serialized.cpp |61.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_bridge_v1.{pb.h ... grpc.pb.h} |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_cdc_stream/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |61.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_coordination_v1.{pb.h ... grpc.pb.h} |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_topic_v1.{pb.h ... grpc.pb.h} |61.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_blob_depot.{pb.h ... grpc.pb.h} |61.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/profiler.{pb.h ... grpc.pb.h} |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_242486256e1af973cd1d5376d1.o |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/mon_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs_fixture.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/run_ydb.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |62.0%| [UN] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |62.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/fq/streaming_optimize/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/ingress/ut/unittest |62.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |62.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.0%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/olap/scenario/py3test |62.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.1%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/service/ut_json_change_record/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_cdc_stream/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/ttl/py3test |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_connection_ut.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/tx/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_index_build/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_streaming_query/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/move_pdisk.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/ttl/py3test |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_replica/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/parametrized_queries/py3test |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/load_test/ut_ycsb/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/hive/ut/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/bool_ut.cpp |61.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/wardens/py3test |62.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |62.1%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/stability/healthcheck/ydb_cli |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/cdc/tests/py3test |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/federated_query/ut/unittest |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |62.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/rate_limiter/ut/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/backup/unittest |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/viewer/tests/ydb-tests-stress-viewer-tests |62.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/core-grpc_services-grpc_request_check_actor_ut |62.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_c623700776b43ee95ec93c56f9.o |62.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_2b682e146a665bfa19210b0fd9.o |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |62.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_8120ef49e7e653ed0601604313.o |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut_sequence/unittest |62.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/tests/objcopy_df4191b43fee1a7d77acb3207f.o |61.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_f93c60b04a0499f2ec6880591a.o |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |61.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/config/validation/ut/unittest |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/audit/ut/ydb-core-mon-audit-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/tests/ydb-tests-stress-testshard_workload-tests |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/interconnect/ut/unittest |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |61.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/pgproxy/ut/unittest |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |61.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |61.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/async/benchmark/benchmark_actors_async |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_e0aef87c4bf15cfdc957f4bdd1.o |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/tests/objcopy_7479409fb33baf855b74c3e835.o |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/sql/py3test |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_245adf3e28f56e6467e034d9f2.o |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/topic/tests/objcopy_acf74a4313fbcafa6df239e3ec.o |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_reattach_ut.cpp |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_d3af02c7d57ea2cbbe5d381baa.o |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_overload_ut.cpp |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_31d605682329607481eb568ed0.o |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/olap_workload/tests/py3test |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/keyvalue/ut/unittest |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/tsserver/tsserver |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |61.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_7648c2519d02b8456f762efc4b.o |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |61.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/core/manual_test/shared_threads/main.cpp |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mon/audit/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut/ydb-core-base-ut |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |61.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon/ut_utils/libcore-mon-ut_utils.a |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/ydb-dump.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |61.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-transfer-topic-to-table.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |61.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_compaction/unittest |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-topic.cpp |61.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp |61.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/helpers/libclient-oauth2_token_exchange-helpers.a |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_36807918bd7a86c1ea37310c9c.o |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_656baae3c1e24959f5bcc457d7.o |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |61.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask-Cors/py3/libpy3python-Flask-Cors-py3.global.a |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |61.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_5992d4831c5055a481712a2a80.o |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |61.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/ut/consumer_offset_tracker_ut.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/pqtablet/partition/ut/message_id_deduplicator_ut.cpp |61.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |61.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |61.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |61.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |61.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |61.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_sort.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |61.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/lib/libpy3tools-ydb_serializable-lib.global.a |61.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_listfromrange.cpp |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |61.5%| [SB] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/hyperloglog.pb.{h, cc} |61.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join.cpp |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/yson/proto/protobuf_interop.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/attributes.pb.{h, cc} |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/task_command_executor.pb.{h, cc} |61.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_queue.cpp |61.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/guid.pb.{h, cc} |61.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_combine.cpp |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/graph_params.pb.{h, cc} |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_3d6916930a438b51675ef6dda7.o |61.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/persqueue.pb.{h, cc} |61.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_93dc3386250916dfae1ecb9b13.o |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |61.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_64cecb639c5f85fbf868097a08.o |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/viewer/tests/objcopy_8e0f2cf91b35e6051ad82535a4.o |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/viewer/tests/objcopy_e508a8abac843a0a0f92fc62eb.o |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/viewer/tests/objcopy_e55498abceca534315a6428452.o |61.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/viewer/workload/libpy3stress-viewer-workload.global.a |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |61.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/hive/proto/cluster_directory.pb.{h, cc} |61.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |61.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/tests/objcopy_ee633aebbed65e39e09fbf4271.o |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_ut.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_0359848ae21601186c5b0d9873.o |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_persqueue_v1.{pb.h ... grpc.pb.h} |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_277b7e8f79021687bec95be8db.o |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_blobmap_ut.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_af18efc2f04dd1af5ca802c329.o |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_f3c323ef80ada193284f036d44.o |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |61.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_87b299e07b15c86f4f50f458ef.o |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |61.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_tablemetadata.{pb.h ... grpc.pb.h} |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/grpc_request_check_actor_ut/grpc_request_check_actor_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |61.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |61.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot.{pb.h ... grpc.pb.h} |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |61.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/logoblob_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |61.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_afdf6d60c4f76ae91a235d460b.o |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/provider/ut/yql_pq_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup_collection/unittest |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |62.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/tests/objcopy_6bdc69403f0fa7c03edbf19c28.o |62.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/cq_actor/cq_actor.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/persqueue/topic_parser/ut/topic_names_converter_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |61.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |61.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_backup_collection/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_partlayout_ut.cpp |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/audit/audit_ut.cpp |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |62.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |62.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/tests/objcopy_d3749b17b0bc2057695c3e736a.o |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |62.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_cache/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backtrace.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/ut/unittest |62.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/appendable_compressed_file.cpp |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |62.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |62.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/oidc_proxy/libydb-mvp-oidc_proxy.a |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/openid_connect.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |62.2%| [UN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/oidc_proxy_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_block_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |62.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/plan2svg/ydb-tests-functional-kqp-plan2svg |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mon/audit/url_matcher_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/groupinfo/blobstorage_groupinfo_iter_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_view/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_helpers.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/tsserver/main.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_log_codec.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adaptive_hedging_manager.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/inotify.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_background_cleaning/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/trie.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_priority_provider.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export_fs.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/localdb_ut.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_background_cleaning/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_background_cleaning/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/memory_stats_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/ncloud/impl/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tiering/ut/unittest |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mon/audit/ut/unittest |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut_console/ydb-core-cms-console-ut_console |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_resource_pool_reboots/ydb-core-tx-schemeshard-ut_resource_pool_reboots |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |62.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/external_sources/object_storage/inference/ut/gtest |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/opt/ut/ydb-library-yql-dq-opt-ut |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/public/mlp/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |62.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/backup/common/ut/unittest |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |62.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/common/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_disk_quotas/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_secret_reboots/ydb-core-tx-schemeshard-ut_secret_reboots |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |62.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/s3/object_listers/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |62.5%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/ydb_cli |62.5%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/simple_queue |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/partcheck/partcheck |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |62.6%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/cfg |62.3%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/topic_workload |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |62.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/gateway/ut/gtest |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/close_with_load/ydb-core-kqp-ut-close_with_load |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/testlib/ut/ydb-library-actors-testlib-ut |62.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/transfer |62.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_secret/test-results/unittest/{meta.json ... results_accumulator.log} |62.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |62.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut_configs_dispatcher/test-results/unittest/{meta.json ... results_accumulator.log} |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/slow/ydb-public-sdk-cpp-src-client-topic-ut-slow |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/kmeans_ut.cpp |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/blobstorage_grouptype_ut.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_guardian_impl_ut.cpp |62.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/request_complexity_limits.pb.{h, cc} |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/path_ut.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/statestorage_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/table_index_ut.cpp |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.2%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/ut/fulltext_ut.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tablet_flat/ut_pg/unittest |62.4%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/generated/control_board_proto.h |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/error.pb.{h, cc} |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_next_value.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/config.pb.{h, cc} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/metric_meta.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chopper.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_decimal.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/blobstorage_config.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/actors_ut.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tobytes.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_compress.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_tablet.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map_join.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/blob_range.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/issue_id.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_sum.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_resource_pool_reboots/ut_resource_pool_reboots.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/retry_config.pb.{h, cc} |62.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/dummy.{pb.h ... grpc.pb.h} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_metrics.{pb.h ... grpc.pb.h} |62.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_a0543c2dc30365e9b2ad3d0ca6.o |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_df0cb3f315162a3110ee243ecd.o |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/maintenance.{pb.h ... grpc.pb.h} |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_secret_reboots/ut_secret_reboots.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/grouped_issues_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_e0331f455507fe5ac3b71d0537.o |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/partcheck/main.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/tools/dump/main.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/functional/ydb-core-transfer-ut-functional |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/libpy3transfer.global.a |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_dynamic_config_v1.{pb.h ... grpc.pb.h} |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_testshard.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_export_v1.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/objcopy_b632f28ee823f938d14c0e85f9.o |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_keyvalue_v1.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/audit/v1/common/action_type.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/grpc.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/metadata/initializer/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_secret/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_configs_dispatcher/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ttl/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/lib/ydb_cli/topic/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/dread_cache_service/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sqs.{pb.h ... grpc.pb.h} |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_column_build/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/balance_coverage/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |62.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_snapshot/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/common/ut/unittest |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/mixedpy/tests/py3test |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/solomon/actors/ut/unittest |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut_console/test-results/unittest/{meta.json ... results_accumulator.log} |62.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |62.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/bulk_upsert/gtest |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |62.4%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/dq/runtime/ut/unittest |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/sequenceshard/ut/unittest |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |62.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/s3/actors/ut/unittest |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |62.5%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |62.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_disk_quotas/ydb-core-tx-datashard-ut_disk_quotas |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/build_index/ut/unittest |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/topic_kafka/tests/py3test |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_comp_defrag/core-blobstorage-ut_blobstorage-ut_comp_defrag |62.4%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_followers/unittest |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut_sentinel_unstable/unittest |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/dq/actors/ut/unittest |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |62.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/sdk/cpp/sdk_credprovider/unittest |62.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |62.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/persqueue/pqtablet/quota/ut/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |62.3%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/helpers_ut.cpp |62.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_double_indexed/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/top_ut.cpp |62.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/slow/txusage_slow_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/testlib/decorator_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/close_with_load/kqp_cwl_qs.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/close_with_load/kqp_cwl.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_fulltext_index_build_reboots.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp |62.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |62.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_48884f6b745ced4d3e78997cb1.o |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_aebf7c73fcaf6a54715cc177c8.o |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/opt/ut/dq_opt_interesting_orderings_ut.cpp |62.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_5333c1912ecbac0f64ff97551f.o |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/fs_backup_validation_ut.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_null.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/opt/ut/dq_cbo_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_check_args.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromstring.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/ut_console/unittest |62.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/ytree/proto/ypath.pb.{h, cc} |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |62.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/opt/ut/dq_opt_hypergraph_ut.cpp |62.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/yql_mount.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_if.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_rh_hash.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain1_map.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_func.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/service.pb.{h, cc} |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mod.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ifpresent.cpp |62.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multimap.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/event.pb.{h, cc} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_appendix_ut.cpp |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_maintenance.pb.{h, cc} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_view.pb.{h, cc} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/proto/fetch_config.pb.{h, cc} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.{h, cc} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.{h, cc} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/fq_private_v1.{pb.h ... grpc.pb.h} |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_rate_limiter_v1.{pb.h ... grpc.pb.h} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/ydb_config_v1.{pb.h ... grpc.pb.h} |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |62.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.6%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompiler/rescompiler |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_load.{pb.h ... grpc.pb.h} |62.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |62.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/libpy3nemesis.global.a |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_upload_rows/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus.{pb.h ... grpc.pb.h} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minstep/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_upload_rows/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/mvp/oidc_proxy/bin/main.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_upload_rows/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/scheme_board/ut_subscriber/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/idx_test/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_14c03c6aecffbe39cb01ddf2ed.o |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/reservoir_sampling/libreservoir_sampling_udf.so |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/idx_test/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/idx_test/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/snap_vec_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/idx_test/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/idx_test/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/datashard/s3/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/rename/py3test |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/database/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/sys_view/partition_stats/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/statistics/aggregator/ut/unittest |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/comp_defrag.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_965640ca94893d27c182c611e2.o |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_upload_rows/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_upload_rows/unittest |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/persqueue_cluster_discovery/ut/unittest |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_minstep/unittest |63.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/limits/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/synclog/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/objcopy_81ae81681ce2388a653cfa5ba3.o |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/cms/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_265d7fd505d52534f38ea6fb7f.o |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/effects/unittest |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/ctas/tests/ydb-tests-stress-ctas-tests |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} |63.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} |63.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |62.9%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/nemesis |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/workload/ydb_cli |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |62.9%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload/tsserver |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/gtest |63.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_shred_reboots/ydb-core-tx-schemeshard-ut_shred_reboots |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/mvp/oidc_proxy/ut/unittest |62.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/async/ut/unittest |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_column_stats/unittest |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_c068ee86eb127df13256bfbe45.o |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/ut_mirror3of4/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_data_ut.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_40779f0570229cef213050a4fa.o |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/view/unittest |62.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_0aefef587c181350d3a25f70e0.o |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_a52eb3c900a84eaad86a211549.o |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_40226ff8497733c6e798ee3940.o |62.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/rbo/objcopy_8a4839ce59e28719331c9dfe7e.o |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/common/result_set_format/ut/ydb-core-kqp-common-result_set_format-ut |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/public/mlp/ut/ydb-core-persqueue-public-mlp-ut |62.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_yql_ut.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_342e8590e41686b18307d054a9.o |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_e66920085df69f6f7e41547063.o |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_pg_ut.cpp |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |62.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_c693478edc1220e7a9143567d1.o |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_2492aafb6862566a2398c9f27e.o |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/objcopy_b5b36403e069f48d06f8367722.o |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_disk_quotas.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_3df021aac8504049c53286aea0.o |62.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_ru_calculator/unittest |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_d52256d4fa9895f38df6030445.o |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_sequence/unittest |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/ut_rdma/ydb-library-actors-interconnect-ut_rdma |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/scheme/ut_pg/scheme_tablecell_pg_ut.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_fulltext_index.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |62.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_aggrcount.cpp |62.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_discard.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_coalesce.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_div.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_exists.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ensure.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_count.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_scan/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/base/ut/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_condense.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_range.cpp |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/pgproxy.pb.{h, cc} |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/node_tracker_client/proto/node.pb.{h, cc} |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/tests/objcopy_28c396580e7e319c4a82e15fc9.o |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/tests/objcopy_75e82e9b2ff2024ae902b7d5e4.o |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_some.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/arrow/libsrc-client-arrow.a |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_c19b3eb5266bf8e49b2b628bc2.o |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/fs_settings.{pb.h ... grpc.pb.h} |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_mediator_timecast.{pb.h ... grpc.pb.h} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |63.1%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so |63.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join.cpp |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/sessions/gtest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_vacuum.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_scan/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/keyvalue/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/annotations.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/repl/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_serverless/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/script_execution/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_stats/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/serverless/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/bscontroller/ut_bscontroller/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/cms/console/validators/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_kqp_scan/unittest |63.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/cms/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blob_depot/closed_interval_set_ut.cpp |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blob_depot/given_id_range_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/formats/arrow/ut/ydb-library-formats-arrow-ut |63.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/metering/ut/unittest |63.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |63.3%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/raw_socket/ut/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore_fs.cpp |63.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/result_set_format/tests/ydb-tests-stress-result_set_format-tests |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_tiered_ut.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build_reboots/ydb-core-tx-schemeshard-ut_column_build_reboots |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/statistics/ydb-tests-functional-statistics |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/actors/interconnect/ut_huge_cluster/ydb-library-actors-interconnect-ut_huge_cluster |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_shred_reboots/ut_shred_reboots.cpp |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/s3_backups |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |63.2%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/actors/dnsresolver/ut/unittest |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_790c6ea4aad5e761d21421b25d.o |63.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_4e45fac9e6e2cbc502659b10eb.o |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_channel_service_ut.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/mlp/mlp_writer_ut.cpp |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/olap_workload |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/testshard_workload/workload_testshard |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/transfer/ut/column_table/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/ctas/tests/objcopy_4e3ea6c3c5a0438f05942dbc81.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/public/sdk/cpp/tests/integration/auth/gtest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/backpressure/ut_client/unittest |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |63.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |63.0%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/sqs_topic_ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kafka/kafka_streams_test |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/actors/interconnect/ut_rdma/port_manager.cpp |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/workload_log |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_result_set_formats_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/mlp/mlp_reader_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/result_set_format/ut/kqp_formats_ut_helpers.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/ut/ut_helper.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/public/mlp/mlp_changer_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_element.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_arrow_helpers_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_transfer/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_channel_ut.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/misc/proto/protobuf_helpers.pb.{h, cc} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/chunk_client/proto/data_statistics.pb.{h, cc} |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_backup.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/common/result_set_format/ut/kqp_formats_arrow_ut.cpp |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_transfer/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rowlocks.cpp |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/tracing/proto/tracing_ext.pb.{h, cc} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain_map.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/formats/arrow/ut/ut_splitter.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/tests/objcopy_2650fc91e1b7439f48496ea51e.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/hullds_sst_it_all_ut.cpp |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/tests/objcopy_ab542bc0d25a481768cec1ad31.o |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/result_set_format/tests/objcopy_51c4be7ece033a38cc2a2dfb64.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/statistics/objcopy_3382de65b417782bf648c475b1.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/formats/arrow/ut/ut_size_calcer.cpp |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/statistics/objcopy_94f66830f5c535f3f015b42e43.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/formats/arrow/ut/ut_arrow.cpp |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/statistics/objcopy_2a98e2f0e66f286cb125620511.o |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/generic/blobstorage_hullwritesst_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build_reboots/ut_column_build_reboots.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_blocks.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/workload/type/libpy3olap_workload-workload-type.global.a |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_restore_ut.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_pdisk_config.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_logstore_v1.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache_ut.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |63.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/workload/libpy3stress-olap_workload-workload.global.a |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem_ut.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_env.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_failure.cpp |63.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/objcopy_06a563d04a0e897145ef1061d2.o |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/audit/v1/common/action.{pb.h ... grpc.pb.h} |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |63.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_minmax.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_datashard.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/ydb_result_set_old.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |63.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_internal.{pb.h ... grpc.pb.h} |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/minikql_engine.{pb.h ... grpc.pb.h} |63.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_change_collector/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/tx_allocator_client/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/query/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl_ut.cpp |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kesus/proxy/ut/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/codecs_ut.cpp |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_move/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_base/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/testlib/actors/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/mind/ut/unittest |63.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/test-results/unittest/{meta.json ... results_accumulator.log} |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/indexes/ydb-core-kqp-ut-olap-indexes |63.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_partition_stats/test-results/unittest/{meta.json ... results_accumulator.log} |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |63.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |63.2%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_init/unittest |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |63.1%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/s3_backups_workload |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tests/ydb-tests-stability-tests |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |63.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/libpy3workload_log.global.a |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_color_limits.cpp |63.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |63.1%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kafka/workload/ydb_cli |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/objcopy_e0d6c3883613601e455029f31f.o |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/library/yql/dq/actors/compute/ut/unittest |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} |62.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kafka/libpy3kafka_streams_test.global.a |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_context.cpp |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_chunk_tracker.cpp |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/barriers/barriers_tree_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/driver_lib/version/ut/unittest |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |62.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/objcopy_b61dd8e76e6166fc1abc85e60e.o |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |62.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_export/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |62.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/statistics/objcopy_8cba80b2275265b72407436cdf.o |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_sectormap.cpp |62.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/client/minikql_compile/ut/unittest |62.9%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |62.9%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/library/yaml_config/ut/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |62.7%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/result_set_format/tests/py3test |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut_configs_dispatcher/ydb-core-cms-console-ut_configs_dispatcher |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/stress/s3_backups/tests/py3test |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/objcopy_4508aef343f36758ea760320db.o |62.8%| [TA] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |62.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/libpy3s3_backups.global.a |62.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut_service/slow/ydb-core-kqp-federated_query-ut_service-slow |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.cpp |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |62.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_races.cpp |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |62.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |62.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/statistics/c4711c742b4f72331dccea9c2a_raw.auxcpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |62.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |62.8%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |62.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_crypto_ut.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_ut.cpp |62.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/testshard_workload/libpy3workload_testshard.global.a |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/objcopy_9de271b22d7bcc64ef77cc3cde.o |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/libpy3olap_workload.global.a |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_643fa2679e88d9b2d33558b050.o |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_22edc4e58ff43cb5e83c9bbe2c.o |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_e7dfca3c87220ea0ed36a65f9f.o |62.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/daf02fd86bb7e2296f1437ae1f_raw.auxcpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_yard.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |62.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_logical.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hopping.cpp |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |62.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_state.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterable.cpp |62.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |62.8%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |62.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |62.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lazy_list.cpp |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_trace/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_trace/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scripting.pb.{h, cc} |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |62.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_map_join.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |62.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tests/objcopy_dfafe5e4a6d4b0c6fbf440843b.o |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |62.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tests/objcopy_8eee7558f17cd3744d1b7a5e68.o |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_maintenance_v1.{pb.h ... grpc.pb.h} |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/ydb_tablet_v1.{pb.h ... grpc.pb.h} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |63.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kiwisolver/py3/libpy3python-kiwisolver-py3.global.a |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/change_exchange.{pb.h ... grpc.pb.h} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |62.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/time_grid_ut.cpp |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |62.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |62.9%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_info.{pb.h ... grpc.pb.h} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d0e1cde98d2ab34e72d18aae9c.o |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d2d4e3343da9b011ee6a983244.o |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/federated_query/ut_service/slow/kqp_federated_query_actors_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.0%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/objcopy_4f055c289b3de8f2a1e827ae5c.o |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.1%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.2%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |63.3%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/http_proxy/ut/inside_ydb_ut/unittest |63.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib/py3/libpy3python-matplotlib-py3.global.a |63.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_953328e5c3275a286b65dc3b1d.o |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cycler/py3/libpy3python-cycler-py3.global.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kiwisolver/py3/libpy3python-kiwisolver-py3.a |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cppy/libpy3contrib-python-cppy.global.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/contourpy/libpy3contrib-python-contourpy.global.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libwebp/sharpyuv/liblibs-libwebp-sharpyuv.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pillow/py3/libpy3python-Pillow-py3.global.a |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/qhull/libcontrib-libs-qhull.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_external_data_source/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_trace/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_trace/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libpng/libcontrib-libs-libpng.a |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_trace/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pillow/py3/libpy3python-Pillow-py3.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/security/certificate_check/ut/unittest |63.4%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lcms2/libcontrib-libs-lcms2.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/fonttools/libpy3contrib-python-fonttools.global.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libtiff/libcontrib-libs-libtiff.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openjpeg/libcontrib-libs-openjpeg.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libjpeg-turbo/libcontrib-libs-libjpeg-turbo.a |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libwebp/libcontrib-libs-libwebp.a |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.5%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/freetype/libcontrib-libs-freetype.a |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/contourpy/libpy3contrib-python-contourpy.a |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/storagepoolmon/ut/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.6%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/tests/functional/blobstorage/py3test |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/cost/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/datashard/ut_erase_rows/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/nodewarden/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/runtime/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/services/ext_index/ut/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TS] {BAZEL_DOWNLOAD, FAILED} ydb/core/tx/schemeshard/ut_partition_stats/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.7%| [TM] {BAZEL_DOWNLOAD, FAILED} ydb/core/kqp/ut/rbo/unittest |63.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut/ibv_ut.cpp |63.8%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/kafka_workload |63.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/metering/stream_ru_calculator_ut.cpp |63.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/core/actorsystem.cpp |63.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib/py3/extern/agg24-svn/libpy3-extern-agg24-svn.a |63.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib/py3/extern/ttconv/libpy3py3-extern-ttconv.a |63.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/olefile/py3/libpy3python-olefile-py3.global.a |63.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest-timeout/py3/libpy3python-pytest-timeout-py3.global.a |63.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tests/objcopy_c5bab83c436501626a45161d22.o |63.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tests/objcopy_8c41a9066682151bfb4a1a2f9a.o |63.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stability/utils/libpy3library-stability-utils.global.a |63.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stability/libpy3tests-library-stability.global.a |63.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tests/objcopy_ebd06ca3d7aa9b5b1b6ec851b7.o |63.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |63.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |63.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyparsing/py3/libpy3python-pyparsing-py3.global.a |63.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_proxy_wrapper.cpp |63.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut/utils.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/mem_pool.cpp |63.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/core/event_pb.cpp |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp |63.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |63.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/cq_actor/cq_actor.cpp |63.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |64.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib/py3/libpy3python-matplotlib-py3.a |64.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp |64.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut/ibv_ut.cpp |64.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/indexes/indexes_ut.cpp |64.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/examples/02_discovery/main.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/core/actorsystem.cpp |64.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_channel.cpp |64.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_proxy_wrapper.cpp |64.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut/utils.cpp |64.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp |64.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |64.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp |64.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/examples/02_discovery/main.cpp |64.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_mon.cpp |64.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_channel.cpp |64.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_mon.cpp |65.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut/sticking_ut.cpp |65.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/rdma/ut/allocator_ut.cpp |65.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp |65.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut/large.cpp |65.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut/sticking_ut.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp |65.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut/connection_checker_ut.cpp |65.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/rdma/ut/allocator_ut.cpp |65.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_handshake.cpp |65.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/dq/service/service_node.cpp |65.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_tcp_proxy.cpp |65.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut/large.cpp |65.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/dq/service/service_node.cpp |66.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut/connection_checker_ut.cpp |66.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut/interconnect_ut.cpp |66.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_handshake.cpp |66.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_tcp_proxy.cpp |66.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/dq/service/interconnect_helpers.cpp |66.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut_fat/main.cpp |66.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |66.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_tcp_input_session.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut/interconnect_ut.cpp |66.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/dq/service/interconnect_helpers.cpp |67.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp |67.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |67.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut_fat/main.cpp |67.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/column_table/ydb-core-transfer-ut-column_table |67.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_tcp_input_session.cpp |67.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/testlib/test_runtime.cpp |67.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |67.3%| [AR] {RESULT} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |67.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp |67.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |67.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/auth/ydb-public-sdk-cpp-tests-integration-auth |68.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/testlib/test_runtime.cpp |69.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/interconnect_tcp_session.cpp |69.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/interconnect_tcp_session.cpp |69.9%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |70.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |70.2%| [AR] {RESULT} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |70.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |70.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/rdma/libactors-interconnect-rdma.a |70.9%| [AR] {RESULT} $(B)/ydb/library/actors/interconnect/rdma/libactors-interconnect-rdma.a |71.2%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/rdma/libactors-interconnect-rdma.a |71.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |71.4%| [AR] {RESULT} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |71.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp |72.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp |72.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/rdma/cq_actor/libinterconnect-rdma-cq_actor.a |72.4%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |72.4%| [AR] {RESULT} $(B)/ydb/library/actors/interconnect/rdma/cq_actor/libinterconnect-rdma-cq_actor.a |72.5%| [AR] {RESULT} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |72.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |72.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/rdma/cq_actor/libinterconnect-rdma-cq_actor.a |72.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |73.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |73.4%| [AR] {RESULT} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |73.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |73.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |73.4%| [AR] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |73.4%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |73.4%| [AR] {RESULT} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |73.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |73.4%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |73.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |73.4%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/client/libyt-yt-client.a |73.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |73.5%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a |73.5%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/core/libyt-yt-core.a |73.5%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |73.5%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |73.5%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |73.5%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |73.5%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |73.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |73.5%| [LD] {RESULT} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |73.5%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut |73.5%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |73.5%| [LD] {RESULT} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |73.5%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/pqtablet/blob/ut/ydb-core-persqueue-pqtablet-blob-ut |73.6%| [LD] {RESULT} $(B)/ydb/core/persqueue/pqtablet/blob/ut/ydb-core-persqueue-pqtablet-blob-ut |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |73.7%| [LD] {RESULT} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |73.7%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/dnsresolver/ut/ydb-library-actors-dnsresolver-ut |73.7%| [LD] {RESULT} $(B)/ydb/library/actors/dnsresolver/ut/ydb-library-actors-dnsresolver-ut |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/wilson/ut/ydb-library-actors-wilson-ut |73.7%| [LD] {RESULT} $(B)/ydb/library/actors/wilson/ut/ydb-library-actors-wilson-ut |73.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/async/ut/ydb-library-actors-async-ut |73.9%| [LD] {RESULT} $(B)/ydb/library/actors/async/ut/ydb-library-actors-async-ut |74.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/testlib/basics/runtime.cpp |74.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |74.3%| [AR] {RESULT} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |74.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |74.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/testlib/basics/runtime.cpp |74.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |75.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |75.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |75.4%| [LD] {RESULT} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |75.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |75.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |75.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |75.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |75.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |75.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |75.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |75.5%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |75.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |75.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |75.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |75.7%| [LD] {RESULT} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |75.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |75.8%| [LD] {RESULT} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |75.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |75.8%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32 |75.8%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32 |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/wilson/ut/ydb-library-actors-wilson-ut |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |75.8%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut |75.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/dnsresolver/ut/ydb-library-actors-dnsresolver-ut |75.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/pqtablet/blob/ut/ydb-core-persqueue-pqtablet-blob-ut |75.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp |75.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32 |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/dq/local_gateway/yql_dq_gateway_local.cpp |75.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |75.9%| [LD] {RESULT} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |75.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |75.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/async/ut/ydb-library-actors-async-ut |75.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/testactorsys.cpp |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/backup/backup_stress |75.9%| [LD] {RESULT} $(B)/ydb/tests/stress/backup/backup_stress |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/backup/backup_stress |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/opt/ut/ydb-library-yql-dq-opt-ut |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/opt/ut/ydb-library-yql-dq-opt-ut |75.9%| [LD] {RESULT} $(B)/ydb/library/yql/dq/opt/ut/ydb-library-yql-dq-opt-ut |75.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |75.9%| [AR] {RESULT} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |75.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/limiter/grouped_memory/ut/ydb-core-tx-limiter-grouped_memory-ut |75.9%| [LD] {RESULT} $(B)/ydb/core/tx/limiter/grouped_memory/ut/ydb-core-tx-limiter-grouped_memory-ut |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/limiter/grouped_memory/ut/ydb-core-tx-limiter-grouped_memory-ut |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |75.9%| [LD] {RESULT} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/table/show_create_table |75.9%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/table/show_create_table |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/table/show_create_table |75.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic_kafka/tests/ydb-tests-stress-topic_kafka-tests |75.9%| [LD] {RESULT} $(B)/ydb/tests/stress/topic_kafka/tests/ydb-tests-stress-topic_kafka-tests |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic_kafka/tests/ydb-tests-stress-topic_kafka-tests |76.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut |76.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |76.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/util/actorsys_test/single_thread_ic_mock.cpp |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic/tests/ydb-tests-stress-topic-tests |76.0%| [LD] {RESULT} $(B)/ydb/tests/stress/topic/tests/ydb-tests-stress-topic-tests |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic/tests/ydb-tests-stress-topic-tests |76.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |76.0%| [AR] {RESULT} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |76.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |76.0%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |76.0%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/result_set_format/tests/ydb-tests-stress-result_set_format-tests |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/result_set_format/tests/ydb-tests-stress-result_set_format-tests |76.2%| [LD] {RESULT} $(B)/ydb/tests/stress/result_set_format/tests/ydb-tests-stress-result_set_format-tests |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |76.2%| [LD] {RESULT} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |76.2%| [LD] {RESULT} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |76.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |76.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |76.3%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |76.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |76.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |76.4%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |76.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_group/main.cpp |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |76.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |76.5%| [LD] {RESULT} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |76.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |76.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_group/main.cpp |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/async/benchmark/benchmark_actors_async |76.6%| [LD] {RESULT} $(B)/ydb/library/actors/async/benchmark/benchmark_actors_async |76.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |76.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |76.6%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/result_set_format/result_set_format |76.6%| [LD] {RESULT} $(B)/ydb/tests/stress/result_set_format/result_set_format |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/result_set_format/result_set_format |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/tests-stress-scheme_board-pile_promotion-tests |76.6%| [LD] {RESULT} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/tests-stress-scheme_board-pile_promotion-tests |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/scheme_board/pile_promotion/tests/tests-stress-scheme_board-pile_promotion-tests |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |76.7%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |76.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/async/benchmark/benchmark_actors_async |76.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/tests/ydb-tests-stress-mixedpy-tests |76.7%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/tests/ydb-tests-stress-mixedpy-tests |76.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/tests/ydb-tests-stress-mixedpy-tests |76.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/http/ut/ydb-library-actors-http-ut |76.9%| [LD] {RESULT} $(B)/ydb/library/actors/http/ut/ydb-library-actors-http-ut |76.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/ut/ydb-library-actors-interconnect-ut |76.9%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/ut/ydb-library-actors-interconnect-ut |76.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |76.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/interconnect-rdma-ut_mem_pool_limit_128 |76.9%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/interconnect-rdma-ut_mem_pool_limit_128 |76.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/cdc |76.9%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/cdc |76.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/cdc |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |77.0%| [LD] {RESULT} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/ctas/tests/ydb-tests-stress-ctas-tests |77.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/ctas/tests/ydb-tests-stress-ctas-tests |77.0%| [LD] {RESULT} $(B)/ydb/tests/stress/ctas/tests/ydb-tests-stress-ctas-tests |77.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/interconnect-rdma-ut_mem_pool_limit_128 |77.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/http/ut/ydb-library-actors-http-ut |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/example/ydb-tests-example |77.0%| [LD] {RESULT} $(B)/ydb/tests/example/ydb-tests-example |77.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/example/ydb-tests-example |77.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/pqtablet/partition/ut/ydb-core-persqueue-pqtablet-partition-ut |77.0%| [LD] {RESULT} $(B)/ydb/core/persqueue/pqtablet/partition/ut/ydb-core-persqueue-pqtablet-partition-ut |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |77.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |77.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/ut/ydb-library-actors-interconnect-ut |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |77.1%| [LD] {RESULT} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |77.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/pqtablet/partition/ut/ydb-core-persqueue-pqtablet-partition-ut |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/public/partition_key_range/ut/ydb-core-persqueue-public-partition_key_range-ut |77.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/public/partition_key_range/ut/ydb-core-persqueue-public-partition_key_range-ut |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |77.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |77.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |77.2%| [LD] {RESULT} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |77.2%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |77.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |77.3%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |77.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |77.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |77.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |77.4%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |77.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |77.5%| [LD] {RESULT} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |77.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/backup/tests/ydb-tests-stress-backup-tests |77.5%| [LD] {RESULT} $(B)/ydb/tests/stress/backup/tests/ydb-tests-stress-backup-tests |77.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/backup/tests/ydb-tests-stress-backup-tests |77.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |77.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |77.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |77.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group |77.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |77.7%| [LD] {RESULT} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |77.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |77.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut |77.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |77.7%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |77.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/metering/ut/ydb-core-metering-ut |77.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/public/partition_key_range/ut/ydb-core-persqueue-public-partition_key_range-ut |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |77.8%| [LD] {RESULT} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/ctas/ctas |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/ctas/ctas |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/ctas/ctas |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/node_broker |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/node_broker |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/node_broker |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/streaming/streaming |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/streaming/streaming |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/streaming/streaming |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/show_create_view |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/show_create_view |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/show_create_view |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/dqrun |77.8%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dqrun/dqrun |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dqrun/dqrun |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/ydb-tests-sql |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/ydb-tests-sql |77.8%| [LD] {RESULT} $(B)/ydb/tests/sql/ydb-tests-sql |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |77.8%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/tests/ydb-tests-stress-testshard_workload-tests |77.8%| [LD] {RESULT} $(B)/ydb/tests/stress/testshard_workload/tests/ydb-tests-stress-testshard_workload-tests |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/testshard_workload/tests/ydb-tests-stress-testshard_workload-tests |77.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/formats/arrow/ut/ydb-library-formats-arrow-ut |77.9%| [LD] {RESULT} $(B)/ydb/library/formats/arrow/ut/ydb-library-formats-arrow-ut |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/s3_backups |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/s3_backups |77.9%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/s3_backups |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/olap_workload |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/olap_workload |77.9%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/olap_workload |77.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |77.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_state.cpp |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |77.9%| [LD] {RESULT} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |78.0%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |78.0%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |78.0%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |78.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |78.1%| [LD] {RESULT} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |78.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/formats/arrow/ut/ydb-library-formats-arrow-ut |78.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |78.1%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |78.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/streaming/tests/ydb-tests-stress-streaming-tests |78.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/streaming/tests/ydb-tests-stress-streaming-tests |78.2%| [LD] {RESULT} $(B)/ydb/tests/stress/streaming/tests/ydb-tests-stress-streaming-tests |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |78.3%| [LD] {RESULT} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |78.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/range_treap/ut_range_treap/ydb-library-range_treap-ut_range_treap |78.3%| [LD] {RESULT} $(B)/ydb/library/range_treap/ut_range_treap/ydb-library-range_treap-ut_range_treap |78.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/table/tests/ydb-tests-stress-show_create-table-tests |78.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/table/tests/ydb-tests-stress-show_create-table-tests |78.4%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/table/tests/ydb-tests-stress-show_create-table-tests |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |78.7%| [LD] {RESULT} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |78.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/range_treap/ut_range_treap/ydb-library-range_treap-ut_range_treap |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_proxy.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/ut_helpers.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/backpressure/ut_client/backpressure_ut.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/ut_helpers.cpp |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/transfer |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/transfer |78.8%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/transfer |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |78.8%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |78.8%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |78.8%| [LD] {RESULT} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |78.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |78.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_pipe.cpp |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/scheme_board/pile_promotion/pile_promotion_workload |78.8%| [LD] {RESULT} $(B)/ydb/tests/stress/scheme_board/pile_promotion/pile_promotion_workload |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/scheme_board/pile_promotion/pile_promotion_workload |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |78.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/core/harmonizer/ut/ydb-library-actors-core-harmonizer-ut |78.9%| [LD] {RESULT} $(B)/ydb/library/actors/core/harmonizer/ut/ydb-library-actors-core-harmonizer-ut |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |78.9%| [LD] {RESULT} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |78.9%| [LD] {RESULT} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |78.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/debug_log.cpp |78.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/debug_log.cpp |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/dstool/ydb-tests-functional-dstool |78.9%| [LD] {RESULT} $(B)/ydb/tests/functional/dstool/ydb-tests-functional-dstool |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/dstool/ydb-tests-functional-dstool |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |78.9%| [LD] {RESULT} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |79.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |79.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |79.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/core/harmonizer/ut/ydb-library-actors-core-harmonizer-ut |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |79.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_scheme_request.cpp |79.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/ut_rdma/ydb-library-actors-interconnect-ut_rdma |79.0%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/ut_rdma/ydb-library-actors-interconnect-ut_rdma |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ydb |79.0%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/common/ut/ydb-core-persqueue-common-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/persqueue/common/ut/ydb-core-persqueue-common-ut |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |79.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |79.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |79.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |79.1%| [LD] {RESULT} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_read_session_info.cpp |79.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |79.1%| [LD] {RESULT} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |79.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |79.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/persqueue/common/ut/ydb-core-persqueue-common-ut |79.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |79.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/commit_offset_actor.cpp |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |79.1%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |79.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/ut_rdma/ydb-library-actors-interconnect-ut_rdma |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |79.2%| [LD] {RESULT} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |79.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |79.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |79.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |79.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |79.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/lib/node_warden_mock_bsc.cpp |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/add_column/ydb-tests-datashard-add_column |79.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/add_column/ydb-tests-datashard-add_column |79.2%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |79.2%| [AR] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/add_column/ydb-tests-datashard-add_column |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |79.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |79.3%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |79.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |79.3%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metacache.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/persqueue_v1/actors/read_info_actor.cpp |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |79.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |79.3%| [LD] {RESULT} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_persqueue.cpp |79.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [AR] {RESULT} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |79.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |79.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest.cpp |79.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/viewer/tests/ydb-tests-stress-viewer-tests |79.3%| [LD] {RESULT} $(B)/ydb/tests/stress/viewer/tests/ydb-tests-stress-viewer-tests |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/viewer/tests/ydb-tests-stress-viewer-tests |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |79.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |79.4%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/driver/nemesis |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/driver/nemesis |79.4%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/driver/nemesis |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |79.4%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |79.4%| [LD] {RESULT} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |79.4%| [LD] {RESULT} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |79.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/perf/table-perf |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/partcheck/partcheck |79.5%| [LD] {RESULT} $(B)/ydb/tools/partcheck/partcheck |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |79.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |79.5%| [LD] {RESULT} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |79.5%| [LD] {RESULT} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |79.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |79.5%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |79.5%| [LD] {RESULT} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |79.5%| [LD] {RESULT} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |79.5%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |79.5%| [LD] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |79.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |79.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |79.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |79.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/ut/ydb-core-util-ut |79.6%| [LD] {RESULT} $(B)/ydb/core/util/ut/ydb-core-util-ut |79.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |79.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |79.6%| [LD] {RESULT} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |79.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |79.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/helpers/ut/ydb-library-actors-helpers-ut |79.7%| [LD] {RESULT} $(B)/ydb/library/actors/helpers/ut/ydb-library-actors-helpers-ut |79.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/partcheck/partcheck |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |79.7%| [LD] {RESULT} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |79.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tools/sql2yql/sql2yql |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |79.7%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |79.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |79.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |79.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/ut_fat/ydb-library-actors-interconnect-ut_fat |79.7%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/ut_fat/ydb-library-actors-interconnect-ut_fat |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/join_perf/bin/join_perf |79.7%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/join_perf/bin/join_perf |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/join_perf/bin/join_perf |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/solomon/reading/ydb-tests-solomon-reading |79.8%| [LD] {RESULT} $(B)/ydb/tests/solomon/reading/ydb-tests-solomon-reading |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/solomon/reading/ydb-tests-solomon-reading |79.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |79.8%| [LD] {RESULT} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |79.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/test/tool/surg/surg |79.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/move_pdisk.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |79.8%| [LD] {RESULT} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/cfg/bin/ydb_configure |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/cfg/bin/ydb_configure |79.8%| [LD] {RESULT} $(B)/ydb/tools/cfg/bin/ydb_configure |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/move_pdisk.cpp |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |79.8%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/cancellation.cpp |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/examples/01_ping_pong/example_01_ping_pong |79.8%| [LD] {RESULT} $(B)/ydb/library/actors/examples/01_ping_pong/example_01_ping_pong |79.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |79.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write.cpp |79.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_write_actor.cpp |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |79.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/cancellation.cpp |79.9%| [LD] {RESULT} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |79.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/ut/ydb-core-util-ut |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |79.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |79.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |79.9%| [AR] {default-linux-x86_64, release, asan, pic} $(B)/yt/yt/core/libyt-yt-core.a |79.9%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |79.9%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |79.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |79.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |79.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |79.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |79.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |80.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/etcd_proxy |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |80.0%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |80.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/helpers/ut/ydb-library-actors-helpers-ut |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/s3_backups_workload |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/s3_backups_workload |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kafka/workload/ydb_cli |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kafka/workload/ydb_cli |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/workload/ydb_cli |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/workload/ydb_cli |80.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |80.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |80.0%| [PK] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |80.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/transfer_workload |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/transfer_workload |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |80.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/examples/01_ping_pong/example_01_ping_pong |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload/ydb_cli |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/testshard_workload/workload/ydb_cli |80.0%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic_kafka/workload/ydb_cli |80.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic_kafka/workload/ydb_cli |80.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/dstool/ydb-dstool |80.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/ut_fat/ydb-library-actors-interconnect-ut_fat |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/dstool/ydb-dstool |80.0%| [LD] {RESULT} $(B)/ydb/apps/dstool/ydb-dstool |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |80.1%| [LD] {RESULT} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |80.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/workload/ydb_cli |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/node_broker_workload |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/node_broker_workload |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/log/workload/ydb_cli |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/workload/ydb_cli |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/workload/ydb_cli |80.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |80.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |80.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |80.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/olap_workload |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/olap_workload |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |80.1%| [LD] {RESULT} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/oltp_workload |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/oltp_workload |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |80.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/ydb_cli |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/ydb_cli |80.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tests/tpch/tpch |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tests/tpch/tpch |80.1%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/tpch/tpch |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/cfg |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/cfg |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |80.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |80.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/stability/healthcheck/ydb_cli |80.1%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/library/stability/healthcheck/ydb_cli |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic/workload/ydb_cli |80.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic/workload/ydb_cli |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |80.1%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/nemesis |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |80.2%| [LD] {RESULT} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |80.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/nemesis |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |80.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv_volume/workload/ydb_cli |80.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv_volume/workload/ydb_cli |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/tsserver/tsserver |80.2%| [LD] {RESULT} $(B)/ydb/tools/tsserver/tsserver |80.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |80.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |80.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/tsserver/tsserver |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |80.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/comp_defrag.cpp |80.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/comp_defrag.cpp |80.2%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/ctas_workload |80.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |80.2%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/ctas_workload |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |80.3%| [LD] {RESULT} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut |80.3%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload/tsserver |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/core/manual_test/shared_threads/shared_threads |80.3%| [LD] {RESULT} $(B)/ydb/library/actors/core/manual_test/shared_threads/shared_threads |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |80.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |80.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/core/manual_test/shared_threads/shared_threads |80.3%| [BN] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/tsserver |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/workload/objcopy_33abb7f7b873fc17cb192a5592.o |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp >> SchemeBorders::Partial [GOOD] >> SchemeRanges::CmpBorders [GOOD] |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kafka/workload/objcopy_43f03d8ce0f7a500b6680c8688.o |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |80.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kv/workload/objcopy_33abb7f7b873fc17cb192a5592.o |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_49e50404ca7d8e93240b3617b2.o |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic_kafka/workload/objcopy_d6a1b41fc15f14862f8ec129ed.o |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/workload/libpy3stress-kv-workload.global.a |80.3%| [AR] {RESULT} $(B)/ydb/tests/stress/kv/workload/libpy3stress-kv-workload.global.a |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_bc86b379db0e87d023f98a592e.o |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kafka/workload/libpy3stress-kafka-workload.global.a |80.3%| [AR] {RESULT} $(B)/ydb/tests/stress/kafka/workload/libpy3stress-kafka-workload.global.a |80.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> SchemeRanges::CmpBorders [GOOD] |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/workload/objcopy_3cad7ffe5c68e883c93b3c3b3a.o |80.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic_kafka/workload/libpy3stress-topic_kafka-workload.global.a |80.3%| [AR] {RESULT} $(B)/ydb/tests/stress/topic_kafka/workload/libpy3stress-topic_kafka-workload.global.a >> TStrongTypeTest::DefaultConstructorDeleted [GOOD] |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/workload/objcopy_ad01b515e6ee8206b6b9fafbc1.o >> TTokenBucketTest::Unlimited [GOOD] >> TSimpleCacheTest::TestNotSoSimpleCache [GOOD] >> TQueueInplaceTests::TestSimpleInplace [GOOD] >> TTokenBucketTest::Limited [GOOD] >> TQueueInplaceTests::PopTooManyTimes [GOOD] >> TQueueInplaceTests::EmplacePopDefault [GOOD] >> TStrongTypeTest::DefaultConstructorValue [GOOD] >> TULID::ParseAndFormat [GOOD] >> TPriorityOperationQueueTest::UpdateNonExistingShouldReturnFalse [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityWaitingQueue [GOOD] >> TPriorityQueueTest::TestOrder [GOOD] >> TULID::HeadByteOrder >> TPriorityOperationQueueTest::ShouldUpdatePriorityReadyQueue [GOOD] >> TSimpleCacheTest::TestSimpleCache [GOOD] >> TTokenBucketTest::DelayCalculation [GOOD] >> TQueueInplaceTests::MoveAssignment [GOOD] >> TQueueInplaceTests::DestroyInDestructor [GOOD] >> TQueueInplaceTests::MoveConstructor [GOOD] >> TULID::HeadByteOrder [GOOD] >> TULID::TailByteOrder [GOOD] >> TULID::EveryBitOrder [GOOD] >> TULID::Generate [GOOD] >> TWildcardTest::TestWildcard [GOOD] >> TWildcardTest::TestWildcards [GOOD] |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |80.3%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_bc86b379db0e87d023f98a592e.o >> AuthTokenAllowed::PassOnListMatchUserSid [GOOD] >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] >> TopicNameConverterTest::LegacyStyle [GOOD] >> TopicNameConverterTest::FirstClass [GOOD] |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |80.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSid [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |80.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp >> AuthTokenAllowed::PassOnEmptyListAndToken [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/s3_bench/ydb_s3_bench |80.3%| [LD] {RESULT} $(B)/ydb/tests/tools/s3_bench/ydb_s3_bench |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/cluster_balancing.cpp |80.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/workload/libpy3stress-mixedpy-workload.global.a |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |80.3%| [AR] {RESULT} $(B)/ydb/tests/stress/mixedpy/workload/libpy3stress-mixedpy-workload.global.a |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |80.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TWildcardTest::TestWildcards [GOOD] |80.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |80.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |80.3%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic/workload/objcopy_456ef6a09159ceaa7dd7dffa59.o |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |80.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |80.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] |80.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::FirstClass [GOOD] |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |80.3%| [AR] {RESULT} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |80.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload/libpy3stress-testshard_workload-workload.global.a |80.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] |80.3%| [AR] {RESULT} $(B)/ydb/tests/stress/testshard_workload/workload/libpy3stress-testshard_workload-workload.global.a |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/workload/libpy3stress-log-workload.global.a |80.4%| [AR] {RESULT} $(B)/ydb/tests/stress/log/workload/libpy3stress-log-workload.global.a |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.4%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv_volume/workload/objcopy_13cb62373e81699a2d2c9afb86.o |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/cluster_balancing.cpp |80.4%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.4%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/stability/healthcheck/objcopy_75ddb319f6ea5a5e05fb01b29c.o |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.4%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kafka/workload/objcopy_43f03d8ce0f7a500b6680c8688.o |80.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic/workload/libpy3stress-topic-workload.global.a |80.4%| [AR] {RESULT} $(B)/ydb/tests/stress/topic/workload/libpy3stress-topic-workload.global.a >> TBlobStorageGroupInfoIterTest::IteratorForward [GOOD] >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] >> TBlobStorageHullFresh::SolomonStandCrash [GOOD] >> TBlobStorageHullFreshSegment::IteratorTest >> TBlobStorageHullFresh::SimpleForward [GOOD] >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] |80.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::IteratorBackward [GOOD] |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/workload_kv |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/workload_kv |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |80.4%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/workload_kv |80.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |80.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut >> TBlobStorageGroupInfoBlobMapTest::CheckCorrectBehaviourWithHashOverlow [GOOD] >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper |80.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_move_pdisk |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_move_pdisk |80.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_move_pdisk |80.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |80.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackwardMiddle [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] |80.4%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/objcopy_49e50404ca7d8e93240b3617b2.o |80.4%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/stability/healthcheck/libpy3library-stability-healthcheck.global.a |80.5%| [AR] {RESULT} $(B)/ydb/tests/library/stability/healthcheck/libpy3library-stability-healthcheck.global.a |80.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> AuthTokenAllowed::PassOnEmptyListAndEmptyToken [GOOD] >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] |80.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/services/deprecated/persqueue_v0/grpc_pq_read_actor.cpp |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary >> TBlobStorageGroupInfoIterTest::PerRealmIterator [GOOD] |80.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] |80.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |80.5%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv_volume/workload/libpy3stress-kv_volume-workload.global.a |80.5%| [AR] {RESULT} $(B)/ydb/tests/stress/kv_volume/workload/libpy3stress-kv_volume-workload.global.a |80.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |80.5%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/topic_kafka/workload/objcopy_d6a1b41fc15f14862f8ec129ed.o >> TBlobStorageGroupInfoTest::GroupQuorumCheckerOrdinary [GOOD] >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] |80.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] |80.5%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.5%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] |80.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageGroupInfoIterTest::Domains [GOOD] >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kv/workload/libpy3stress-kv-workload.global.a |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |80.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |80.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |80.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kafka/kafka_streams_test |80.6%| [LD] {RESULT} $(B)/ydb/tests/stress/kafka/kafka_streams_test |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kafka/kafka_streams_test |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |80.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::GroupQuorumCheckerMirror3dc [GOOD] |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kafka/tests/ydb-tests-stress-kafka-tests |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kafka/tests/ydb-tests-stress-kafka-tests |80.6%| [LD] {RESULT} $(B)/ydb/tests/stress/kafka/tests/ydb-tests-stress-kafka-tests |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kafka/workload/libpy3stress-kafka-workload.global.a |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |80.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |80.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/log/workload/objcopy_3cad7ffe5c68e883c93b3c3b3a.o |80.6%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/kafka_workload |80.6%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/kafka_workload |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp >> TIntervalSetTest::IntervalSetTestEmpty [GOOD] >> TIntervalSetTest::IntervalSetTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAdd |80.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |80.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::Indexes [GOOD] |80.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/topic_kafka/workload/libpy3stress-topic_kafka-workload.global.a |80.6%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/mixedpy/workload/objcopy_ad01b515e6ee8206b6b9fafbc1.o |80.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |80.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageBlocksCacheTest::Repeat [GOOD] >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] >> TCircularOperationQueueTest::CheckOnDoneNotExisting [GOOD] >> TCircularOperationQueueTest::BasicRPSCheck [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight2 [GOOD] >> TCacheTest::TestSizeBasedOverflowCallback [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted [GOOD] >> TCacheTest::EnsureNoLeakAfterQ2CacheDtor [GOOD] >> TCacheTest::Test2QCache [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotRunning [GOOD] >> TCircularOperationQueueTest::BasicRPSCheckWithRound [GOOD] >> TCacheTest::TestLruCache [GOOD] >> TCacheTest::TestUpdateItemSize [GOOD] >> TCacheTest::EnsureNoLeakAfterUnboundedCacheOnMapDtor [GOOD] >> TCircularOperationQueueTest::CheckRemoveRunning [GOOD] >> TCircularOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight1 [GOOD] >> TCircularOperationQueueTest::RemoveNonExistingWhenShuffle [GOOD] >> TCircularOperationQueueTest::CheckTimeoutWhenFirstItemRemoved [GOOD] >> TCacheTest::TestUnboundedMapCache [GOOD] >> TCircularOperationQueueTest::CheckTimeout [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotExisting [GOOD] >> TCircularOperationQueueTest::CheckRemoveWaiting [GOOD] >> TCircularOperationQueueTest::CheckWakeupAfterStop [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted2 [GOOD] >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] >> THyperLogCounterTest::TestGetSet [GOOD] >> THyperLogCounterTest::TestIncrement >> THazardTest::CachedPointers [GOOD] >> THazardTest::AutoProtectedPointers [GOOD] >> TIntervalSetTest::IntervalSetTestAdd [GOOD] >> TIntervalSetTest::IntervalSetTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalSetTestSubtract [GOOD] >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/workload_mixed |80.6%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/workload_mixed |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/workload_mixed |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/testshard_workload/workload_testshard >> THyperLogCounterTest::TestIncrement [GOOD] >> THyperLogCounterTest::TestAddRandom |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/testshard_workload/workload_testshard |80.6%| [LD] {RESULT} $(B)/ydb/tests/stress/testshard_workload/workload_testshard |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestAddAgainstReference |80.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |80.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic_kafka/workload_topic_kafka |80.7%| [LD] {RESULT} $(B)/ydb/tests/stress/topic_kafka/workload_topic_kafka >> TIntervalSetTest::IntervalSetTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic_kafka/workload_topic_kafka >> THyperLogCounterTest::TestAddRandom [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference [GOOD] >> THyperLogCounterTest::TestAddFixed >> TIntervalSetTest::IntervalSetTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetUnion >> THyperLogCounterTest::TestAddFixed [GOOD] >> THyperLogCounterTest::TestHybridIncrement [GOOD] >> THyperLogCounterTest::TestHybridAdd [GOOD] >> TIntervalSetTest::IntervalMapTestEmpty [GOOD] >> TIntervalSetTest::IntervalMapTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalMapTestAdd |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::Repeat [GOOD] |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/mixedpy/workload/libpy3stress-mixedpy-workload.global.a >> TIntervalSetTest::IntervalMapTestAdd [GOOD] >> TIntervalSetTest::IntervalMapTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |80.7%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/log/workload/libpy3stress-log-workload.global.a |80.7%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/topic/workload/objcopy_456ef6a09159ceaa7dd7dffa59.o |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/tools/decrypt/decrypt |80.7%| [LD] {RESULT} $(B)/ydb/core/backup/tools/decrypt/decrypt |80.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |80.7%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/topic_kafka_workload |80.7%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/topic_kafka_workload >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestAddAgainstReference |80.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/topic/workload_topic >> TCircularOperationQueueTest::ShouldStartEmpty [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight1 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight2 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight3 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight10 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight100 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue1 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue2 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue3 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue10 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue100 [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenNothingStarted [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenHasWaitingAndStart [GOOD] >> TCircularOperationQueueTest::UseMinOperationRepeatDelayWhenTimeout [GOOD] >> TCircularOperationQueueTest::ShouldReturnExecTime [GOOD] >> TCircularOperationQueueTest::ShouldTryToStartAnotherOneWhenStartFails [GOOD] >> TCircularOperationQueueTest::ShouldShuffle [GOOD] >> TCircularOperationQueueTest::ShouldTolerateInaccurateTimer [GOOD] >> TCircularQueueTest::Empty [GOOD] >> TCircularQueueTest::ShouldPush [GOOD] >> TCircularQueueTest::ShouldNotPushTwice [GOOD] >> TCircularQueueTest::ShouldNextSingleItem [GOOD] >> TCircularQueueTest::ShouldNextMulti [GOOD] >> TCircularQueueTest::ShouldNotRemoveMissing [GOOD] >> TCircularQueueTest::ShouldGetQueue [GOOD] |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/topic/workload_topic |80.7%| [LD] {RESULT} $(B)/ydb/tests/stress/topic/workload_topic |80.7%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/library/stability/healthcheck/objcopy_75ddb319f6ea5a5e05fb01b29c.o |80.7%| [PY] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kv_volume/workload/objcopy_13cb62373e81699a2d2c9afb86.o >> TIntervalSetTest::IntervalMapTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference >> VDiskBalancing::TestRandom_Block42 >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapUnion >> TIntervalSetTest::IntervalSetUnion [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/topic/workload/libpy3stress-topic-workload.global.a >> VDiskTest::HugeBlobRecompaction >> TCowBTreeTest::SeekForwardPermutationsThreadSafe >> TCowBTreeTest::SeekForwardPermutationsInplace [GOOD] |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/library/stability/healthcheck/libpy3library-stability-healthcheck.global.a >> TCowBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::SnapshotCascade [GOOD] >> TCowBTreeTest::SnapshotRollback |80.7%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/kv_volume/workload/libpy3stress-kv_volume-workload.global.a |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCircularQueueTest::ShouldGetQueue [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace [GOOD] >> TIntervalSetTest::IntervalMapUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecIntersection |80.8%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |80.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/tools/s3_bench/ydb_s3_bench |80.8%| [AR] {BAZEL_UPLOAD} $(B)/ydb/tests/stress/testshard_workload/workload/libpy3stress-testshard_workload-workload.global.a >> TIntervalSetTest::IntervalMapUnion [GOOD] >> TIntervalSetTest::IntervalMapUnionInplace >> AddressClassifierTest::TestAddressExtraction [GOOD] >> AddressClassifierTest::TestAddressParsing [GOOD] >> AddressClassifierTest::TestClassfierWithAllIpTypes [GOOD] >> AddressClassifierTest::TestLabeledClassifier [GOOD] >> AddressClassifierTest::TestLabeledClassifierFromNetData [GOOD] >> TBitsTest::TestNaiveClz [GOOD] >> TBTreeTest::Basics [GOOD] >> TBTreeTest::ClearAndReuse [GOOD] >> TBTreeTest::SeekForwardPermutationsInplace |80.8%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/topic_workload |80.8%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/topic_workload >> TBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TBTreeTest::RandomInsertInplace >> TIntervalSetTest::IntervalVecIntersection [GOOD] >> TIntervalSetTest::IntervalSetIntersection |80.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |80.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |80.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/backup/tools/decrypt/decrypt >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty >> TIntervalSetTest::IntervalMapUnionInplace [GOOD] >> TIntervalSetTest::IntervalMapIntersection |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/common/http_gateway/ut/ydb-library-yql-providers-common-http_gateway-ut |80.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/common/http_gateway/ut/ydb-library-yql-providers-common-http_gateway-ut |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |80.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/simple_queue |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/simple_queue |80.8%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/simple_queue >> TIntervalSetTest::IntervalSetIntersection [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplace >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc >> VDiskBalancing::TestStopOneNode_Block42 >> TIntervalSetTest::IntervalVecIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/common/result_set_format/ut/ydb-core-kqp-common-result_set_format-ut |80.8%| [LD] {RESULT} $(B)/ydb/core/kqp/common/result_set_format/ut/ydb-core-kqp-common-result_set_format-ut |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/common/result_set_format/ut/ydb-core-kqp-common-result_set_format-ut >> CheckIntegrityBlock42::PlacementOkWithErrors >> TIntervalSetTest::IntervalSetIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecDifference >> CheckIntegrityBlock42::PlacementWrongDisks >> TIntervalSetTest::IntervalMapIntersection [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplace >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv_volume/workload_keyvalue_volume |80.9%| [LD] {RESULT} $(B)/ydb/tests/stress/kv_volume/workload_keyvalue_volume |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv_volume/workload_keyvalue_volume >> TIntervalSetTest::IntervalVecDifference [GOOD] >> TIntervalSetTest::IntervalSetDifference >> CheckIntegrityMirror3dc::PlacementBlobIsLost >> TIntervalSetTest::IntervalMapIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalMapDifference |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |80.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |80.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/ut/ydb-library-yql-providers-common-http_gateway-ut >> VDiskBalancing::TestRandom_Mirror3dc |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |80.9%| [LD] {RESULT} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] Test command err: [0:1:0:3:1]# 173 184 157 167 152 185 195 192 144 [0:1:1:1:1]# 189 195 192 171 157 161 167 155 196 [0:1:3:3:1]# 184 157 182 152 185 157 192 144 189 [0:1:3:4:0]# 148 154 155 158 194 160 156 163 140 [0:1:2:3:2]# 152 177 174 176 154 146 161 170 168 [0:1:1:2:1]# 157 167 152 189 195 192 171 157 161 [0:1:1:0:2]# 158 150 131 167 177 161 177 174 173 [0:1:3:0:1]# 161 155 171 196 154 167 184 157 182 [0:1:0:3:2]# 174 173 152 146 184 176 168 157 161 [0:1:2:2:0]# 163 140 161 148 162 159 168 178 190 [0:1:0:2:0]# 161 156 163 159 196 148 190 162 168 [0:1:3:2:1]# 152 185 157 192 144 189 161 155 171 [0:1:2:3:1]# 157 182 173 185 157 167 144 189 195 [0:1:3:1:2]# 157 161 170 131 190 158 161 178 167 [0:1:2:0:1]# 155 171 157 154 167 155 157 182 173 [0:1:3:0:2]# 131 190 158 161 178 167 173 152 177 [0:1:2:0:2]# 190 158 150 178 167 177 152 177 174 [0:1:2:4:1]# 154 167 155 157 182 173 185 157 167 [0:1:2:1:2]# 161 170 168 190 158 150 178 167 177 [0:1:2:4:2]# 178 167 177 152 177 174 176 154 146 [0:1:0:2:1]# 167 152 185 195 192 144 157 161 155 [0:1:0:0:0]# 190 162 168 174 148 154 177 158 194 [0:1:3:2:0]# 156 163 140 196 148 162 162 168 178 [0:1:1:0:1]# 171 157 161 167 155 196 182 173 184 [0:1:0:2:2]# 146 184 176 168 157 161 150 131 190 [0:1:1:0:0]# 178 190 162 155 174 148 160 177 158 [0:1:2:3:0]# 194 160 177 163 140 161 148 162 159 [0:1:2:4:0]# 154 155 174 194 160 177 163 140 161 [0:1:1:3:2]# 177 174 173 154 146 184 170 168 157 [0:1:2:1:1]# 144 189 195 155 171 157 154 167 155 [0:1:1:1:0]# 162 159 196 178 190 162 155 174 148 [0:1:1:3:1]# 182 173 184 157 167 152 189 195 192 [0:1:3:4:1]# 196 154 167 184 157 182 152 185 157 [0:1:1:4:2]# 167 177 161 177 174 173 154 146 184 [0:1:0:1:0]# 159 196 148 190 162 168 174 148 154 [0:1:3:4:2]# 161 178 167 173 152 177 184 176 154 [0:1:0:0:1]# 157 161 155 155 196 154 173 184 157 [0:1:1:4:0]# 155 174 148 160 177 158 140 161 156 [0:1:2:1:0]# 148 162 159 168 178 190 154 155 174 [0:1:2:0:0]# 168 178 190 154 155 174 194 160 177 [0:1:3:3:2]# 173 152 177 184 176 154 157 161 170 [0:1:0:4:0]# 174 148 154 177 158 194 161 156 163 [0:1:1:2:0]# 140 161 156 162 159 196 178 190 162 [0:1:0:1:1]# 195 192 144 157 161 155 155 196 154 [0:1:3:0:0]# 162 168 178 148 154 155 158 194 160 [0:1:3:1:1]# 192 144 189 161 155 171 196 154 167 [0:1:0:4:1]# 155 196 154 173 184 157 167 152 185 [0:1:2:2:1]# 185 157 167 144 189 195 155 171 157 [0:1:3:1:0]# 196 148 162 162 168 178 148 154 155 [0:1:2:2:2]# 176 154 146 161 170 168 190 158 150 [0:1:0:3:0]# 177 158 194 161 156 163 159 196 148 [0:1:3:3:0]# 158 194 160 156 163 140 196 148 162 [0:1:0:1:2]# 168 157 161 150 131 190 177 161 178 [0:1:3:2:2]# 184 176 154 157 161 170 131 190 158 [0:1:1:3:0]# 160 177 158 140 161 156 162 159 196 [0:1:1:2:2]# 154 146 184 170 168 157 158 150 131 [0:1:1:4:1]# 167 155 196 182 173 184 157 167 152 [0:1:1:1:2]# 170 168 157 158 150 131 167 177 161 [0:1:0:0:2]# 150 131 190 177 161 178 174 173 152 [0:1:0:4:2]# 177 161 178 174 173 152 146 184 176 mean# 166.6666667 dev# 15.11254078 |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy >> TIntervalSetTest::IntervalSetDifference [GOOD] >> TIntervalSetTest::IntervalVecDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetTestIterator [GOOD] |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |80.9%| [LD] {RESULT} $(B)/ydb/mvp/oidc_proxy/bin/mvp_oidc_proxy |81.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |80.9%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/simple_queue |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor >> TIntervalSetTest::IntervalMapDifference [GOOD] >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] |81.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/simple_queue |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |81.0%| [LD] {RESULT} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |81.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |81.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskTest::HugeBlobRecompaction [GOOD] >> CheckIntegrityBlock42::PlacementOkWithErrors [GOOD] >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |81.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |81.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalSetTestIterator [GOOD] |81.0%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest >> CheckIntegrityBlock42::PlacementWrongDisks [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy |81.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobRecompaction [GOOD] Test command err: Change MinHugeBlobSize# 512000 Change MinHugeBlobSize# 307200 |81.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |81.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest >> TCowBTreeTest::SnapshotRollback [GOOD] >> TCowBTreeTest::SnapshotRollbackEarlyErase |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] |81.0%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |81.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |81.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/examples/02_discovery/example_02_discovery |81.1%| [LD] {RESULT} $(B)/ydb/library/actors/examples/02_discovery/example_02_discovery |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/core/ut/ydb-library-actors-core-ut |81.1%| [LD] {RESULT} $(B)/ydb/library/actors/core/ut/ydb-library-actors-core-ut >> CheckIntegrityMirror3dc::PlacementBlobIsLost [GOOD] |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |81.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |81.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut >> CheckIntegrityMirror3dc::PlacementDisintegrated >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty [GOOD] >> TLogPriorityMuteTests::MuteUntilTest [GOOD] >> TLogPriorityMuteTests::AtomicMuteUntilTest [GOOD] >> TLogPriorityMuteTests::UnmuteTest [GOOD] >> TLogPriorityMuteTests::AtomicUnmuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteDurationTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteDurationTest [GOOD] >> TOneOneQueueTests::TestSimpleEnqueueDequeue [GOOD] >> TOneOneQueueTests::CleanInDestructor [GOOD] >> TOneOneQueueTests::DeleteInDestructor [GOOD] >> TOneOneQueueTests::ReadIterator [GOOD] >> TPageMapTest::TestResize >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown >> TPageMapTest::TestResize [GOOD] >> TPageMapTest::TestRandom |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |81.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tuple_DataValue |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |81.2%| [LD] {RESULT} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |81.2%| [LD] {RESULT} $(B)/ydb/tests/stress/statistics_workload/statistics_workload |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/statistics_workload/statistics_workload >> KqpFormats_Arrow_Conversion::NestedType_Tuple_DataValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tuple_NestedValue >> CheckIntegrityBlock42::PlacementBlobIsLost >> CheckIntegrityMirror3dc::PlacementOkWithErrors >> KqpFormats_Arrow_Conversion::NestedType_Tuple_NestedValue [GOOD] >> CheckIntegrityBlock42::DataOk >> KqpFormats_Arrow_Conversion::NestedType_Tuple_OptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tuple_TaggedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_StructValue >> KqpFormats_Arrow_Conversion::NestedType_Tagged_StructValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_TupleValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_TaggedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_TaggedOptionalTaggedTaggedValue [GOOD] |81.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/examples/02_discovery/example_02_discovery >> TBTreeTest::RandomInsertInplace [GOOD] >> TBTreeTest::RandomInsertThreadSafe >> CheckIntegrityMirror3dc::DataErrorOneCopy [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] >> TBlobStorageHullHugeKeeperPersState::SerializeParse >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> TChainLayoutBuilder::TestBucketsV2 [GOOD] |81.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/interconnect/ut_huge_cluster/ydb-library-actors-interconnect-ut_huge_cluster |81.2%| [LD] {RESULT} $(B)/ydb/library/actors/interconnect/ut_huge_cluster/ydb-library-actors-interconnect-ut_huge_cluster |81.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/core/ut/ydb-library-actors-core-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] Test command err: RandomSeed# 15887141742381219943 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2026-01-08T15:22:58.842304Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:194:17] ServerId# [1:296:63] TabletId# 72057594037932033 PipeClientId# [3:194:17] 2026-01-08T15:22:58.857109Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:229:17] ServerId# [1:301:68] TabletId# 72057594037932033 PipeClientId# [8:229:17] 2026-01-08T15:22:58.857294Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:215:17] ServerId# [1:299:66] TabletId# 72057594037932033 PipeClientId# [6:215:17] 2026-01-08T15:22:58.857437Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:208:17] ServerId# [1:298:65] TabletId# 72057594037932033 PipeClientId# [5:208:17] 2026-01-08T15:22:58.857608Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:201:17] ServerId# [1:297:64] TabletId# 72057594037932033 PipeClientId# [4:201:17] 2026-01-08T15:22:58.857741Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:187:17] ServerId# [1:295:62] TabletId# 72057594037932033 PipeClientId# [2:187:17] 2026-01-08T15:22:58.857844Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:222:17] ServerId# [1:300:67] TabletId# 72057594037932033 PipeClientId# [7:222:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |81.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Tagged_TaggedOptionalTaggedTaggedValue [GOOD] |81.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |81.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |81.2%| [LD] {RESULT} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom >> KqpFormats_Arrow_Conversion::NestedType_List_OptionalValue |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |81.3%| [BN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/statistics_workload >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_OptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_OptionalVariantValue |81.3%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/statistics_workload |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |81.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |81.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_vdisk_io/vdisk_io.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 4759204574637000558 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2026-01-08T15:22:58.166067Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |81.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TChainLayoutBuilder::TestBucketsV2 [GOOD] |81.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |81.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_vdisk_io/vdisk_io.cpp |81.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest >> CheckIntegrityMirror3dc::PlacementDisintegrated [GOOD] >> CheckIntegrityMirror3dc::DataOk >> TCowBTreeTest::SnapshotRollbackEarlyErase [GOOD] >> TCowBTreeTest::ShouldCallDtorsInplace [GOOD] |81.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/interconnect/ut_huge_cluster/ydb-library-actors-interconnect-ut_huge_cluster >> TCowBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TEventPriorityQueueTest::TestPriority [GOOD] >> TFastTlsTest::IterationAfterThreadDeath >> TFastTlsTest::IterationAfterThreadDeath [GOOD] >> TFastTlsTest::ManyThreadLocals [GOOD] >> TFastTlsTest::ManyConcurrentKeys >> CheckIntegrityBlock42::DataOk [GOOD] >> CheckIntegrityBlock42::DataOkAdditionalEqualParts |81.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |81.3%| [LD] {RESULT} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless >> KqpFormats_Arrow_Conversion::NestedType_List_OptionalVariantValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_TaggedValue >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_TaggedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_DataValue [GOOD] >> CheckIntegrityBlock42::PlacementBlobIsLost [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_ListValue >> CheckIntegrityBlock42::PlacementAllOnHandoff >> CheckIntegrityMirror3dc::PlacementOkWithErrors [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks >> KqpFormats_Arrow_Conversion::NestedType_Optional_ListValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_DictValue >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/workload_log |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/log/workload_log |81.3%| [LD] {RESULT} $(B)/ydb/tests/stress/log/workload_log ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] Test command err: RandomSeed# 11680143675247050638 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** |81.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> TFastTlsTest::ManyConcurrentKeys [GOOD] >> TFifoQueueTest::ShouldPushPop [GOOD] >> TFragmentedBufferTest::TestWriteRead [GOOD] >> TFragmentedBufferTest::TestOverwriteRead [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead2 [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead3 [GOOD] >> TFragmentedBufferTest::Test3WriteRead [GOOD] >> TFragmentedBufferTest::Test5WriteRead [GOOD] >> TFragmentedBufferTest::TestIsNotMonolith [GOOD] >> TFragmentedBufferTest::TestGetMonolith [GOOD] >> TFragmentedBufferTest::TestSetMonolith [GOOD] >> TFragmentedBufferTest::TestReplaceWithSetMonolith [GOOD] >> TFragmentedBufferTest::CopyFrom [GOOD] >> TFragmentedBufferTest::ReadWriteRandom >> KqpFormats_Arrow_Conversion::NestedType_Optional_DictValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_OptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_OptionalVariantValue [GOOD] |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |81.3%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf >> KqpFormats_Arrow_Conversion::DataType_Timestamp >> CheckIntegrityBlock42::DataOkAdditionalEqualParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] Test command err: RandomSeed# 8176913871119114206 SEND TEvPut with key [1:1:1:0:0:100:0] 2026-01-08T15:23:00.231664Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:23:00.232214Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2026-01-08T15:23:00.477189Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 |81.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] Test command err: RandomSeed# 2952621487808892785 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 ], ver1 disks [ 2 ] ERROR: There are unequal parts *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ], ver1 disks [ 3 4 5 ] ERROR: There are unequal parts |81.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning >> KqpFormats_Arrow_Conversion::DataType_Timestamp [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDate >> KqpFormats_Arrow_Conversion::DataType_UInt8 >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize >> KqpFormats_Arrow_Conversion::DataType_Bool |81.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk >> TSubgroupPartLayoutTest::CountEffectiveReplicas1of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |81.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk >> KqpFormats_Arrow_Conversion::DataType_UInt8 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_UInt16 >> KqpFormats_Arrow_Conversion::DataType_Bool [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Decimal >> KqpFormats_Arrow_Conversion::DataType_UInt16 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_UInt32 >> KqpFormats_Arrow_Conversion::DataType_UInt32 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_UInt64 >> CheckIntegrityBlock42::PlacementAllOnHandoff [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated >> CheckIntegrityMirror3dc::DataOk [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Decimal [GOOD] >> KqpFormats_Arrow_Conversion::DataType_DyNumber |81.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Optional_OptionalVariantValue [GOOD] |81.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_UInt64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzTimestamp >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost >> TBlobStorageReplRecoveryMachine::BasicFunctionality >> TBTreeTest::RandomInsertThreadSafe [GOOD] >> TBTreeTest::DuplicateKeysInplace >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDate [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDatetime |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/testlib/ut/ydb-library-actors-testlib-ut |81.4%| [LD] {RESULT} $(B)/ydb/library/actors/testlib/ut/ydb-library-actors-testlib-ut >> TBTreeTest::DuplicateKeysInplace [GOOD] >> TBTreeTest::DuplicateKeysThreadSafe >> KqpFormats_Arrow_Conversion::DataType_DyNumber [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Double >> KqpFormats_Arrow_Conversion::DataType_Double [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Date >> KqpFormats_Arrow_Conversion::DataType_Yson >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts >> KqpFormats_Arrow_Conversion::DataType_Date [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Datetime |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/statistics/ydb-tests-functional-statistics |81.4%| [LD] {RESULT} $(B)/ydb/tests/functional/statistics/ydb-tests-functional-statistics |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/statistics/ydb-tests-functional-statistics >> TBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TBTreeTest::ShouldCallDtorsInplace [GOOD] >> TBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TBTreeTest::Concurrent >> KqpFormats_Arrow_Conversion::DataType_Datetime [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Date32 >> KqpFormats_Arrow_Conversion::DataType_Date32 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Datetime64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_EmptyDict >> KqpFormats_Arrow_Conversion::DataType_Int8 >> KqpFormats_Arrow_Conversion::DataType_Yson [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_DataValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_NestedValue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataOk [GOOD] Test command err: RandomSeed# 7063142107855249766 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ] >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzTimestamp [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzTimestamp64 >> KqpFormats_Arrow_Conversion::DataType_TzDatetime [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Timestamp64 |81.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> KqpFormats_Arrow_Conversion::DataType_EmptyDict [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Timestamp64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDate32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 6933996188002709588 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2026-01-08T15:22:58.858481Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6351:836] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction |81.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> CheckIntegrityBlock42::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] >> TBTreeTest::Concurrent [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Int8 [GOOD] >> TBTreeTest::IteratorDestructor [GOOD] >> TCacheTest::EnsureNoLeakAfterLruCacheDtor [GOOD] >> CompressionTest::lz4_generator_basic [GOOD] >> CompressionTest::lz4_generator_deflates [GOOD] >> FastLookupUniqueList::Stress >> KqpFormats_Arrow_Conversion::DataType_Int16 |81.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] |81.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_Int16 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Int32 |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |81.4%| [LD] {RESULT} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg |81.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/testlib/ut/ydb-library-actors-testlib-ut >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_List_NestedValue [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Int32 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Int64 >> KqpFormats_Arrow_Conversion::NestedType_Dict_DataKey >> KqpFormats_Arrow_Conversion::DataType_TzDate32 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Int64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDatetime64 >> KqpFormats_Arrow_Conversion::DataType_Float |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/util/btree_benchmark/btree_benchmark |81.5%| [LD] {RESULT} $(B)/ydb/core/util/btree_benchmark/btree_benchmark >> KqpFormats_Arrow_Conversion::DataType_TzTimestamp64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Utf8 >> KqpFormats_Arrow_Conversion::DataType_Float [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Interval |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_EmptyDict [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Interval [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_DataKey [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_NestedKey >> KqpFormats_Arrow_Conversion::DataType_Interval64 |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas2of4 [GOOD] Test command err: testing erasure none main# 0 main# 1 Checked 2 cases, took 2 us testing erasure block-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 774635 us testing erasure mirror-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 512 cases, took 85 us testing erasure block-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 997 us testing erasure mirror-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 Checked 64 cases, took 12 us testing erasure block-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 13403 us testing erasure stripe-2-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 4096 cases, took 1042 us |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_Utf8 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Uuid >> KqpFormats_Arrow_Conversion::DataType_Interval64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Json |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] Test command err: RandomSeed# 18001344635456087634 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> KqpFormats_Arrow_Conversion::DataType_Json [GOOD] >> KqpFormats_Arrow_Conversion::DataType_EmptyList >> KqpFormats_Arrow_Conversion::DataType_Uuid [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Void >> TFragmentedBufferTest::ReadWriteRandom [GOOD] >> TQueueBackpressureTest::PerfTrivial |81.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/util/btree_benchmark/btree_benchmark ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] Test command err: RandomSeed# 435000240318970902 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 0 ] part 2: ver0 disks [ 7 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> KqpFormats_Arrow_Conversion::DataType_EmptyList [GOOD] >> KqpFormats_Arrow_Conversion::DataType_TzDatetime64 [GOOD] >> KqpFormats_Arrow_Conversion::DataType_String >> KqpFormats_Arrow_Conversion::DataType_Void [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] >> KqpFormats_Arrow_Conversion::DataType_String [GOOD] >> KqpFormats_Arrow_Conversion::DataType_JsonDocument >> KqpFormats_Arrow_Conversion::NestedType_Dict_NestedKey [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_OptionalKey >> TQueueBackpressureTest::PerfInFlight |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut/ydb-core-base-ut |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut/ydb-core-base-ut |81.5%| [LD] {RESULT} $(B)/ydb/core/base/ut/ydb-core-base-ut |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_EmptyList [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_OptionalKey [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_TaggedKey |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/grpc_server.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] Test command err: RandomSeed# 8598085960483646252 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: part 2: part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TFragmentedBufferTest::ReadWriteRandom [GOOD] |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_Void [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_Struct |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |81.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/grpc_server.cpp |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |81.5%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |81.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> KqpFormats_Arrow_Conversion::NestedType_Variant_Struct [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_Tuple [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_Nested >> KqpFormats_Arrow_Conversion::NestedType_Dict_TaggedKey [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_OptionalVariantKey >> KqpFormats_Arrow_Conversion::NestedType_Struct_OptionalValue |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |81.6%| [LD] {RESULT} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |81.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> TBlobStorageHullHugeChain::HeapAllocSmall [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] >> TChainLayoutBuilder::TestProdConf [GOOD] >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_Nested [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_OverLimit [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Variant_Comprehensive |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.6%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Variant_Comprehensive [GOOD] >> KqpFormats_Arrow_Conversion::PgType_Bool >> KqpFormats_Arrow_Conversion::NestedType_Struct_OptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Struct_TaggedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_DataValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_ListValue >> KqpFormats_Arrow_Conversion::NestedType_Tagged_ListValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_DictValue >> KqpFormats_Arrow_Conversion::PgType_Bool [GOOD] >> KqpFormats_Arrow_Conversion::PgType_Int8 |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.6%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Dict_OptionalVariantKey [GOOD] |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |81.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/libcore-client-server.a >> KqpFormats_Arrow_Conversion::NestedType_Tagged_DictValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_OptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_OptionalOptionalValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Tagged_OptionalTaggedValue [GOOD] >> TQueueBackpressureTest::PerfTrivial [GOOD] |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] |81.6%| [AR] {RESULT} $(B)/ydb/core/client/server/libcore-client-server.a |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest >> KqpFormats_Arrow_Conversion::PgType_Int8 [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Struct_DataValue >> KqpFormats_Arrow_Conversion::PgType_Text >> KqpFormats_Arrow_Conversion::NestedType_Struct_DataValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Struct_NestedValue >> KqpFormats_Arrow_Conversion::NestedType_Struct_NestedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_SingularValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_StructValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_TupleValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_VariantValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_TaggedValue [GOOD] >> KqpFormats_Arrow_Conversion::NestedType_Optional_PgValue [GOOD] |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/mvp/meta/bin/mvp_meta |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/mvp/meta/bin/mvp_meta |81.6%| [LD] {RESULT} $(B)/ydb/mvp/meta/bin/mvp_meta |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest >> PgTest::DumpStringCells |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/blobsan/blobsan |81.6%| [LD] {RESULT} $(B)/ydb/tools/blobsan/blobsan >> PgTest::DumpStringCells [GOOD] |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.6%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Tagged_OptionalTaggedValue [GOOD] |81.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::PgType_Text [GOOD] |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |81.6%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Dict_OptionalVariantKey [GOOD] >> FastLookupUniqueList::Stress [GOOD] >> StLog::Basic [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllReleaseAll [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::NestedType_Optional_PgValue [GOOD] |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest >> TBlobStorageHullHugeChain::AllocFreeAllocTest [GOOD] >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpStringCells [GOOD] |81.6%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::PgType_Text [GOOD] |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris >> TMemoryStatsAggregator::Aggregate_Empty [GOOD] >> TMemoryStatsAggregator::Aggregate_Single [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_DifferentHosts [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_ExternalConsumption_OneHost [GOOD] >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] >> PgTest::DumpIntCells |81.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |81.6%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TableIndex::CompatibleSecondaryIndex [GOOD] >> TableIndex::NotCompatibleSecondaryIndex [GOOD] >> TableIndex::CompatibleVectorIndex [GOOD] >> TableIndex::NotCompatibleVectorIndex [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame3 >> PgTest::DumpIntCells [GOOD] |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] |81.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |81.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TMemoryStatsAggregator::Aggregate_ExternalConsumption_CollidingHosts [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 ExternalConsumption: 306 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 80 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 65 MemAvailable: 85 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 145 SoftLimit: 165 TargetUtilization: 185 ExternalConsumption: 194 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest >> NFulltext::Analyze [GOOD] >> NFulltext::AnalyzeRu [GOOD] >> NFulltext::AnalyzeInvalid [GOOD] >> NFulltext::AnalyzeFilterLength [GOOD] >> NFulltext::AnalyzeFilterLengthRu [GOOD] >> NFulltext::AnalyzeFilterNgram [GOOD] >> NFulltext::AnalyzeFilterSnowball [GOOD] |81.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest >> PgTest::DumpIntCells [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> StLog::Basic [GOOD] Test command err: Producer 0 worked for 0.1560558079 seconds Producer 1 worked for 0.2113484877 seconds Consumer 0 worked for 0.5498892495 seconds Consumer 1 worked for 0.238532322 seconds Consumer 2 worked for 0.6035549473 seconds Consumer 3 worked for 0.4461259149 seconds >> Path::CanonizedStringIsSame1 [GOOD] >> Path::CanonizedStringIsSame2 [GOOD] >> Path::Name_EnglishAlphabet [GOOD] >> Path::Name_RussianAlphabet [GOOD] >> Path::Name_AllSymbols [GOOD] >> Path::Name_ExtraSymbols [GOOD] >> TQueueBackpressureTest::PerfInFlight [GOOD] |81.7%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |81.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/blobsan/blobsan |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/run.cpp >> TBlobStorageGroupTypeTest::TestCorrectLayout [GOOD] >> TBlobStorageGroupTypeTest::OutputInfoAboutErasureSpecies [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C [GOOD] >> Path::Name_RussianAlphabet_SetLocale_C_UTF8 [GOOD] >> Path::Name_WeirdLocale_RegularName [GOOD] >> Path::Name_WeirdLocale_WeirdName [GOOD] |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/run.cpp |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> NFulltext::AnalyzeFilterSnowball [GOOD] |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_ExtraSymbols [GOOD] |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest >> TPageMapTest::TestRandom [GOOD] >> TPageMapTest::TestIntrusive [GOOD] >> TPageMapTest::TestSimplePointer [GOOD] >> TPageMapTest::TestSharedPointer [GOOD] >> TPageMapTest::TestSimplePointerFull |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tests/ydb-tests-stability-tests |81.7%| [LD] {RESULT} $(B)/ydb/tests/stability/tests/ydb-tests-stability-tests >> TPageMapTest::TestSimplePointerFull [GOOD] >> TPriorityOperationQueueTest::ShouldStartEmpty [GOOD] >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriority [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriorityWithRemove [GOOD] >> TPriorityOperationQueueTest::ShouldReturnExecTimeWhenUpdateRunningPriority [GOOD] |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tests/ydb-tests-stability-tests |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfInFlight [GOOD] >> VDiskRestart::Simple [GOOD] |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/service_node/service_node |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest |81.7%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/service_node/service_node |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::Name_WeirdLocale_WeirdName [GOOD] |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_comp_defrag/core-blobstorage-ut_blobstorage-ut_comp_defrag |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_comp_defrag/core-blobstorage-ut_blobstorage-ut_comp_defrag |81.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_comp_defrag/core-blobstorage-ut_blobstorage-ut_comp_defrag |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |81.7%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Defrag |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TPriorityOperationQueueTest::ShouldReturnExecTimeWhenUpdateRunningPriority [GOOD] |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame3 [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame4 [GOOD] |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBlobIdDict::Basic [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] |81.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_phantom_blobs/blobstorage-ut_blobstorage-ut_phantom_blobs |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_phantom_blobs/blobstorage-ut_blobstorage-ut_phantom_blobs |81.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_phantom_blobs/blobstorage-ut_blobstorage-ut_phantom_blobs >> TIncrHugeBasicTest::Recovery [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentTabletId [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentSteps [GOOD] >> TLogoBlobIdHashTest::SimpleTestWithDifferentChannel [GOOD] >> TLogoBlobTest::LogoBlobParse [GOOD] >> TLogoBlobTest::LogoBlobCompare [GOOD] >> TLogoBlobTest::LogoBlobSort [GOOD] >> TGuardianImpl::FollowerTracker [GOOD] >> TGuardianImpl::FollowerTrackerDuplicates [GOOD] >> TLocalDbTest::BackupTaskNameChangedAtLoadTime [GOOD] >> TLogoBlobIdHashTest::SimpleTest [GOOD] >> TLogoBlobIdHashTest::SimpleTestPartIdDoesNotMatter [GOOD] >> TLogoBlobIdHashTest::SimpleTestBlobSizeDoesNotMatter [GOOD] |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest >> NFulltext::ValidateColumnsMatches [GOOD] >> NFulltext::ValidateSettings [GOOD] >> NFulltext::FillSetting [GOOD] >> NFulltext::FillSettingInvalid [GOOD] >> NKMeans::ValidateSettings [GOOD] >> Path::CanonizeOld [GOOD] >> Path::CanonizeFast [GOOD] >> TMemoryStatsAggregator::Aggregate_Summarize_NoExternalConsumption_OneHost [GOOD] >> TMemoryStatsAggregator::Compaction_Single [GOOD] >> TStateStorageConfig::SameConfigurationTest [GOOD] >> TStateStorageConfig::Tablet72075186224040026Test [GOOD] >> TStateStorageConfig::NonDuplicatedNodesTest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame4 [GOOD] |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBlobIdDict::Basic [GOOD] |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] |81.8%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLogoBlobIdHashTest::SimpleTestBlobSizeDoesNotMatter [GOOD] >> TStateStorageConfig::TestReplicaSelection |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Recovery [GOOD] |81.8%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TLogoBlobTest::LogoBlobSort [GOOD] |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> Path::CanonizeFast [GOOD] |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.8%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest |81.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |81.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |81.8%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.8%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |81.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_JsonDocument [GOOD] >> KqpFormats_Arrow_Conversion::DataType_Null [GOOD] >> TAwsSignature::SignPayload [GOOD] |81.8%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.8%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |81.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TAwsSignature::SignPayload [GOOD] |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |81.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TBlobStorageHullHugeHeap::RecoveryMode [GOOD] >> TBlobStorageHullHugeHeap::WriteRestore [GOOD] >> CheckIntegrityBlock42::PlacementOk >> TBlobStorageHullHugeChain::HeapAllocLargeStandard [GOOD] >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/common/result_set_format/ut/unittest >> KqpFormats_Arrow_Conversion::DataType_Null [GOOD] |81.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/common/result_set_format/ut/unittest >> TopTest::Test2 [GOOD] |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::WriteRestore [GOOD] |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test2 [GOOD] |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |81.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest >> CheckIntegrityBlock42::PlacementOk [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |81.9%| [TS] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_pg/unittest |81.9%| [TA] $(B)/ydb/core/kqp/common/result_set_format/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.9%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/common/result_set_format/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |81.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.9%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.9%| [TA] {RESULT} $(B)/ydb/core/kqp/common/result_set_format/ut/test-results/unittest/{meta.json ... results_accumulator.log} |81.9%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |81.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |81.9%| [TA] $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |81.9%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskGC::GCPutKeepIntoEmptyDB |82.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsLocalRecovery::WriteRestartReadHuge |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp >> TBlobStorageHullHugeHeap::BorderValues [GOOD] >> TBlobStorageHullHugeHeap::LockChunks [GOOD] >> TesTTestDecorator::Basic >> CheckIntegrityBlock42::PlacementOkHandoff [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts >> TesTTestDecorator::Basic [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskOutOfSpace::WriteUntilOrangeZone [GOOD] >> TBsVDiskOutOfSpace::WriteUntilYellowZone >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh >> TBsVDiskRepl3::SyncLogTest >> TBsVDiskGC::TGCManyVPutsDelTabletTest >> TBsDbStat::ChaoticParallelWrite_DbStat |82.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest >> TesTTestDecorator::Basic [GOOD] |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::LockChunks [GOOD] >> TBsVDiskExtreme::SimpleGetFromEmptyDB >> TBsVDiskBadBlobId::PutBlobWithBadId |82.0%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest >> TBsVDiskExtreme::Simple3Put3GetFresh |82.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh >> TBsVDiskExtremeHuge::Simple3Put3GetFresh >> TBsLocalRecovery::StartStopNotEmptyDB |82.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |82.0%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] Test command err: RandomSeed# 12706353556972028138 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** |82.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |82.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/ut_blobstorage-ut_cluster_balancing |82.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/ut_blobstorage-ut_cluster_balancing |82.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/ut_blobstorage-ut_cluster_balancing >> TBsVDiskGC::GCPutKeepIntoEmptyDB [GOOD] >> TBsVDiskGC::GCPutBarrierVDisk0NoSync >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh >> TBlobStorageQueueTest::TMessageLost [GOOD] |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction |82.0%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction >> TBsVDiskBadBlobId::PutBlobWithBadId [GOOD] >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath >> TBsVDiskRepl3::SyncLogTest [GOOD] >> THugeMigration::ExtendMap_HugeBlobs >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction >> TBsVDiskExtreme::SimpleGetFromEmptyDB [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh >> TBsVDiskExtreme::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtreme::Simple3Put3GetCompaction |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TBlobStorageQueueTest::TMessageLost [GOOD] >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly |82.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest |82.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut >> TAwsSignature::SignWithEscaping [GOOD] |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] |82.0%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut >> FormatTimes::ParseDuration [GOOD] |82.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |82.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |82.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |82.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 >> TBsVDiskExtremeHuge::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction |82.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |82.1%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |82.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh |82.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TAwsSignature::SignWithEscaping [GOOD] |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] |82.1%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.1%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest >> TDNSGatewaySuite::ShouldResolveHostnameFromDNSDuringInitialization [GOOD] >> TBsVDiskGC::TGCManyVPutsDelTabletTest [GOOD] >> TBsVDiskManyPutGet::ManyPutGet >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction >> TBsVDiskGC::GCPutBarrierVDisk0NoSync [GOOD] >> TBsVDiskGC::GCPutBarrierSync >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath [GOOD] >> TBsVDiskDefrag::DefragEmptyDB |82.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TDNSGatewaySuite::ShouldResolveHostnameFromDNSDuringInitialization [GOOD] |82.1%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest >> StatsFormat::FullStat [GOOD] |82.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |82.1%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut >> THugeHeapCtxTests::Basic [GOOD] >> TopTest::Test1 [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRepl1::ReplProxyData >> Config::IncludeScope [GOOD] |82.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |82.1%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |82.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly |82.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries >> Config::ExcludeScope [GOOD] >> TBsVDiskExtreme::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test1 [GOOD] |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::IncludeScope [GOOD] |82.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |82.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |82.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |82.1%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.2%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/huge/ut/unittest |82.2%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh >> TBsVDiskDefrag::DefragEmptyDB [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |82.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] |82.2%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> THugeMigration::ExtendMap_HugeBlobs [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge >> TStateStorageConfig::NonDuplicatedNodesTest [GOOD] >> TStateStorageConfig::DuplicatedNodesTest |82.2%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh |82.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |82.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |82.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |82.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |82.2%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction >> TBsVDiskRepl1::ReplProxyData [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/bridge_get.cpp >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp >> CheckIntegrityMirror3of4::PlacementOk >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/bridge_get.cpp |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp >> ClusterBalancing::ClusterBalancingEvenDistributionNotPossible >> TAwsSignature::Sign [GOOD] >> TAwsSignature::SignCmp [GOOD] |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolIPV6Case [GOOD] |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |82.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction |82.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution >> TBsVDiskManyPutGet::ManyPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiSinglePutGet |82.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TAwsSignature::SignCmp [GOOD] |82.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] |82.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.2%| [LD] {RESULT} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction |82.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |82.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |82.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica >> CheckIntegrityMirror3of4::PlacementOk [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts >> ClusterBalancing::ClusterBalancingEvenDistribution |82.2%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |82.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolIPV6Case [GOOD] >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken |82.3%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/actors/core/ut_fat/ydb-library-actors-core-ut_fat |82.3%| [LD] {RESULT} $(B)/ydb/library/actors/core/ut_fat/ydb-library-actors-core-ut_fat >> TBsVDiskGC::GCPutBarrierSync [GOOD] >> TBsVDiskGC::GCPutKeepBarrierSync |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSize [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge [GOOD] >> THugeMigration::RollbackMap_HugeBlobs >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh >> CheckIntegrityMirror3of4::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/actors/core/ut_fat/ydb-library-actors-core-ut_fat |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> TAwsSignature::SignWithTime [GOOD] |82.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |82.3%| [LD] {RESULT} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay_yt/query_replay_yt |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/corrupted_reads.cpp |82.3%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt |82.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TAwsSignature::SignWithTime [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/corrupted_reads.cpp >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken |82.3%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> TestProtocols::TestConnectProtocol >> TInterconnectTest::TestSimplePingPong >> TInterconnectTest::OldFormat |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> CheckIntegrityMirror3dc::PlacementOk |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] Test command err: RandomSeed# 7787236108333245294 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** Group is disintegrated or has network problems >> TInterconnectTest::TestSimplePingPong [GOOD] >> TInterconnectTest::TestSubscribeByFlag >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction >> TInterconnectTest::TestBlobEvent220BytesPreSerialized >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> TInterconnectTest::TestNotifyUndelivered >> TestProtocols::TestConnectProtocol [GOOD] >> TestProtocols::TestHTTPCollected >> TInterconnectTest::OldFormat [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew >> TInterconnectTest::TestSubscribeByFlag [GOOD] >> TInterconnectTest::TestReconnect >> TestProtocols::TestResolveProtocol >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] >> TInterconnectTest::TestBlobEvent220BytesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizes |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/distconf_ut.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp >> TestProtocols::TestHTTPCollected [GOOD] >> TInterconnectTest::TestTraceIdPassThrough >> TInterconnectTest::TestNotifyUndelivered [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor >> TInterconnectTest::TestReconnect [GOOD] >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TestProtocols::TestResolveProtocol [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow |82.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |82.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/distconf_ut.cpp |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp >> TInterconnectTest::TestTraceIdPassThrough [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor [GOOD] >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TInterconnectTest::TestBlobEventDifferentSizes [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized >> TInterconnectTest::TestManyEvents >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] >> TReplicaTest::Unsubscribe >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction >> CheckIntegrityMirror3dc::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck >> TReplicaTest::HandshakeWithStaleGeneration >> TReplicaTest::Commit >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw >> TBsVDiskManyPutGet::ManyMultiSinglePutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] Test command err: RandomSeed# 4280419939907778845 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 7 ], ver2 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 0 ] part 2: ver0 disks [ 6 ], ver1 disks [ 1 ] part 3: ver0 disks [ 6 ], ver1 disks [ 2 ] part 4: ver0 disks [ 3 ], ver1 disks [ 6 ] part 5: ver0 disks [ 4 ], ver1 disks [ 6 ] part 6: ver0 disks [ 5 ], ver1 disks [ 6 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> TReplicaTest::Unsubscribe [GOOD] >> TReplicaTest::UnsubscribeUnknownPath >> THugeMigration::RollbackMap_HugeBlobs [GOOD] >> TMonitoring::ReregisterTest [GOOD] >> TReplicaTest::UnsubscribeUnknownPath [GOOD] >> TReplicaTest::HandshakeWithStaleGeneration [GOOD] >> TReplicaTest::IdempotencyUpdatesAliveSubscriber |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestTraceIdPassThrough [GOOD] |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp >> TReplicaTest::Commit [GOOD] >> TReplicaTest::AckNotifications >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |82.3%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp |82.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/ut/ydb-core-control-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] Test command err: 2026-01-08T15:23:41.799094Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @207 (null) -> PendingActivation 2026-01-08T15:23:41.799166Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [5:1:2048] [node 6] ICP01 ready to work 2026-01-08T15:23:41.799858Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @207 (null) -> PendingActivation 2026-01-08T15:23:41.799907Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [6:10:2048] [node 5] ICP01 ready to work 2026-01-08T15:23:41.800153Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @99 PendingActivation -> PendingNodeInfo 2026-01-08T15:23:41.801335Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:120: Proxy [5:1:2048] [node 6] ICP02 configured for host ::1:30214 2026-01-08T15:23:41.801464Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @489 PendingNodeInfo -> PendingConnection 2026-01-08T15:23:41.801848Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:747: Handshake [5:21:2058] [node 6] ICH01 starting outgoing handshake 2026-01-08T15:23:41.801995Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:127: ICR04 Host: ::1, RESOLVED address 2026-01-08T15:23:41.802878Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:751: Handshake [5:21:2058] [node 6] ICH05 connected to peer 2026-01-08T15:23:41.803523Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:39832 2026-01-08T15:23:41.803897Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:1037: Handshake [6:23:2058] [node 0] ICH02 starting incoming handshake 2026-01-08T15:23:41.804790Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExRequest Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337059923976 Serial: 1981259845 ReceiverNodeId: 6 SenderActorId: "[5:1981259845:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 30655" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\204\031\347]\223w\360^?\004\007\356\372\033\021\\\r\366a\240s\333a\024\255\374#\246\217\007\252S" RequestXxhash: true RequestXdcShuffle: true 2026-01-08T15:23:41.805264Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [6:23:2058] [node 5] ICH07 ReceiveExBlock ExRequest Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337059923976 Serial: 1981259845 ReceiverNodeId: 6 SenderActorId: "[5:1981259845:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 30655" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\204\031\347]\223w\360^?\004\007\356\372\033\021\\\r\366a\240s\333a\024\255\374#\246\217\007\252S" RequestXxhash: true RequestXdcShuffle: true 2026-01-08T15:23:41.805321Z node 6 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [6:23:2058] [node 5] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:41.805619Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @99 PendingActivation -> PendingNodeInfo 2026-01-08T15:23:41.806655Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:120: Proxy [6:10:2048] [node 5] ICP02 configured for host ::1:16796 2026-01-08T15:23:41.806708Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:263: Proxy [6:10:2048] [node 5] ICP17 incoming handshake (actor [6:23:2058]) 2026-01-08T15:23:41.806764Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @489 PendingNodeInfo -> PendingConnection 2026-01-08T15:23:41.806824Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:211: Proxy [6:10:2048] [node 5] ICP07 issued incoming handshake reply 2026-01-08T15:23:41.806866Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:220: Proxy [6:10:2048] [node 5] ICP08 No active sessions, becoming PendingConnection 2026-01-08T15:23:41.806900Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @221 PendingConnection -> PendingConnection 2026-01-08T15:23:41.807288Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [6:23:2058] [node 5] ICH07 SendExBlock ExReply Success { Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337073716952 Serial: 2437421578 SenderActorId: "[6:2437421578:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true RdmaErr: "Rdma is not ready on the incomming side" } 2026-01-08T15:23:41.807710Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [5:21:2058] [node 6] ICH07 ReceiveExBlock ExReply Success { Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337073716952 Serial: 2437421578 SenderActorId: "[6:2437421578:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true RdmaErr: "Rdma is not ready on the incomming side" } 2026-01-08T15:23:41.807772Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [5:21:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:41.807878Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:127: ICR04 Host: ::1, RESOLVED address 2026-01-08T15:23:41.808484Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\204\031\347]\223w\360^?\004\007\356\372\033\021\\\r\366a\240s\333a\024\255\374#\246\217\007\252S" 2026-01-08T15:23:41.808594Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [5:21:2058] [node 6] ICH04 handshake succeeded 2026-01-08T15:23:41.808891Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:339: Proxy [5:1:2048] [node 6] ICP20 outgoing handshake succeeded 2026-01-08T15:23:41.808934Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:461: Proxy [5:1:2048] [node 6] ICP052 dropped outgoing handshake: [5:21:2058] poison: false 2026-01-08T15:23:41.808978Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @351 PendingConnection -> StateWork 2026-01-08T15:23:41.809164Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [5:1:2048] [node 6] ICP22 created new session: [5:25:2048] 2026-01-08T15:23:41.809216Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [5:25:2048] [node 6] ICS09 handshake done sender: [5:21:2058] self: [5:1981259845:0] peer: [6:2437421578:0] socket: 24 qp: -1 2026-01-08T15:23:41.809272Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [5:25:2048] [node 6] ICS10 traffic start 2026-01-08T15:23:41.809355Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:319: Session [5:25:2048] [node 6] ICS11 registering socket in PollerActor 2026-01-08T15:23:41.809405Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:25:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.809448Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:358: Session [5:25:2048] [node 6] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2026-01-08T15:23:41.809498Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:25:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.809545Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:241: Session [5:25:2048] [node 6] ICS04 subscribe for session state for [5:19:2057] 2026-01-08T15:23:41.809654Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:39844 2026-01-08T15:23:41.810009Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:1037: Handshake [6:27:2059] [node 0] ICH02 starting incoming handshake 2026-01-08T15:23:41.810125Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:252: InputSession [5:26:2048] [node 6] ICIS01 InputSession created 2026-01-08T15:23:41.810793Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.810880Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:26:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.811152Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [6:23:2058] [node 5] ICH04 handshake succeeded 2026-01-08T15:23:41.811312Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:335: Proxy [6:10:2048] [node 5] ICP19 incoming handshake succeeded 2026-01-08T15:23:41.811355Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:446: Proxy [6:10:2048] [node 5] ICP111 dropped incoming handshake: [6:23:2058] poison: false 2026-01-08T15:23:41.811405Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @351 PendingConnection -> StateWork 2026-01-08T15:23:41.811499Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [6:10:2048] [node 5] ICP22 created new session: [6:28:2048] 2026-01-08T15:23:41.811552Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [6:28:2048] [node 5] ICS09 handshake done sender: [6:23:2058] self: [6:2437421578:0] peer: [5:1981259845:0] socket: 25 qp: -1 2026-01-08T15:23:41.811585Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [6:28:2048] [node 5] ICS10 traffic start 2026-01-08T15:23:41.811662Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:319: Session [6:28:2048] [node 5] ICS11 registering socket in PollerActor 2026-01-08T15:23:41.811723Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.811752Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:358: Session [6:28:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2026-01-08T15:23:41.811781Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.811815Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.811859Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:26:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.811933Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:252: InputSession [6:29:2048] [node 5] ICIS01 InputSession created 2026-01-08T15:23:41.811974Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.812003Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:26:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.812056Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [6:29:2048] [node 5] ICIS02 ReceiveData called 2026-01-08T1 ... tcp_proxy.cpp:263: Proxy [5:1:2048] [node 6] ICP17 incoming handshake (actor [5:42:2061]) 2026-01-08T15:23:41.839764Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:446: Proxy [5:1:2048] [node 6] ICP111 dropped incoming handshake: [5:35:2060] poison: true 2026-01-08T15:23:41.839860Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.h:481: Proxy [5:1:2048] [node 6] ICP04 terminating current session as we are negotiating a new one 2026-01-08T15:23:41.839922Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:105: Session [5:25:2048] [node 6] ICS01 socket: -1 reason# NewSession 2026-01-08T15:23:41.839982Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:543: Proxy [5:1:2048] [node 6] ICP30 unregister session Session# [5:25:2048] VirtualId# [5:1981259845:0] 2026-01-08T15:23:41.840037Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @207 StateWork -> PendingActivation 2026-01-08T15:23:41.840122Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_channel.cpp:461: OutputChannel 0 [node 6] ICOCH89 Notyfying about Undelivered messages! NotYetConfirmed size: 1, Queue size: 0 2026-01-08T15:23:41.840187Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @489 PendingActivation -> PendingConnection 2026-01-08T15:23:41.840245Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:211: Proxy [5:1:2048] [node 6] ICP07 issued incoming handshake reply 2026-01-08T15:23:41.840295Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:220: Proxy [5:1:2048] [node 6] ICP08 No active sessions, becoming PendingConnection 2026-01-08T15:23:41.840339Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @221 PendingConnection -> PendingConnection 2026-01-08T15:23:41.841854Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [5:42:2061] [node 6] ICH07 SendExBlock ExReply Success { Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337059923976 Serial: 1981259846 SenderActorId: "[5:1981259846:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true RdmaErr: "Rdma is not ready on the incomming side" } 2026-01-08T15:23:41.842457Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [6:40:2063] [node 5] ICH07 ReceiveExBlock ExReply Success { Protocol: 2 ProgramPID: 30655 ProgramStartTime: 2337059923976 Serial: 1981259846 SenderActorId: "[5:1981259846:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 30655" AcceptUUID: "Cluster for process with id: 30655" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true RdmaErr: "Rdma is not ready on the incomming side" } 2026-01-08T15:23:41.842545Z node 6 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [6:40:2063] [node 5] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:41.842747Z node 6 :INTERCONNECT DEBUG: interconnect_resolve.cpp:127: ICR04 Host: ::1, RESOLVED address 2026-01-08T15:23:41.843748Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:57066 2026-01-08T15:23:41.844210Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:1037: Handshake [5:44:2062] [node 0] ICH02 starting incoming handshake 2026-01-08T15:23:41.846833Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:653: Handshake [6:40:2063] [node 5] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\364\237\214/\353\363D\013\366\257\026\346\201\\\014\317\305\006\312\262\0217\006\330\351\231\0138\3738@m" 2026-01-08T15:23:41.846986Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [6:40:2063] [node 5] ICH04 handshake succeeded 2026-01-08T15:23:41.847267Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:339: Proxy [6:10:2048] [node 5] ICP20 outgoing handshake succeeded 2026-01-08T15:23:41.847329Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:461: Proxy [6:10:2048] [node 5] ICP052 dropped outgoing handshake: [6:40:2063] poison: false 2026-01-08T15:23:41.847398Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [6:10:2048] [node 5] ICP77 @351 PendingConnection -> StateWork 2026-01-08T15:23:41.847547Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [6:10:2048] [node 5] ICP22 created new session: [6:45:2048] 2026-01-08T15:23:41.847614Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [6:45:2048] [node 5] ICS09 handshake done sender: [6:40:2063] self: [6:2437421579:0] peer: [5:1981259846:0] socket: 24 qp: -1 2026-01-08T15:23:41.847694Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [6:45:2048] [node 5] ICS10 traffic start 2026-01-08T15:23:41.847795Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:319: Session [6:45:2048] [node 5] ICS11 registering socket in PollerActor 2026-01-08T15:23:41.847877Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.847927Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:358: Session [6:45:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2026-01-08T15:23:41.847984Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.848046Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:164: Session [6:45:2048] [node 5] ICS02 send event from: [6:20:2057] to: [5:19:2057] 2026-01-08T15:23:41.855204Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [5:42:2061] [node 6] ICH04 handshake succeeded 2026-01-08T15:23:41.858652Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:252: InputSession [6:46:2048] [node 5] ICIS01 InputSession created 2026-01-08T15:23:41.858746Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:335: Proxy [5:1:2048] [node 6] ICP19 incoming handshake succeeded 2026-01-08T15:23:41.858804Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:446: Proxy [5:1:2048] [node 6] ICP111 dropped incoming handshake: [5:42:2061] poison: false 2026-01-08T15:23:41.858855Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:181: Proxy [5:1:2048] [node 6] ICP77 @351 PendingConnection -> StateWork 2026-01-08T15:23:41.858963Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [5:1:2048] [node 6] ICP22 created new session: [5:47:2048] 2026-01-08T15:23:41.859006Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [5:47:2048] [node 6] ICS09 handshake done sender: [5:42:2061] self: [5:1981259846:0] peer: [6:2437421579:0] socket: 25 qp: -1 2026-01-08T15:23:41.859053Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [5:47:2048] [node 6] ICS10 traffic start 2026-01-08T15:23:41.859107Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:319: Session [5:47:2048] [node 6] ICS11 registering socket in PollerActor 2026-01-08T15:23:41.859154Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.859232Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:358: Session [5:47:2048] [node 6] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2026-01-08T15:23:41.859268Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.859450Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:961: Session [6:45:2048] [node 5] ICS22 outgoing packet Serial# 1 Confirm# 0 DataSize# 84 RdmaPayload# 0 InflightDataAmount# 84 RdmaInflightDataAmount# 0 2026-01-08T15:23:41.859559Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.859620Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:252: InputSession [5:48:2048] [node 6] ICIS01 InputSession created 2026-01-08T15:23:41.859653Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2026-01-08T15:23:41.859712Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.859757Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.859809Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# 106 num# 1 err# 2026-01-08T15:23:41.859889Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.859910Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.859953Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2026-01-08T15:23:41.859980Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.860006Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.860025Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.860048Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [6:46:2048] [node 5] ICIS02 ReceiveData called 2026-01-08T15:23:41.860077Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [6:46:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.860107Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:341: InputSession [5:48:2048] [node 6] ICIS02 ReceiveData called 2026-01-08T15:23:41.860127Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:967: InputSession [5:48:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2026-01-08T15:23:41.860181Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.860208Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.860249Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.860266Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.860306Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.860340Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [6:45:2048] [node 5] ICS23 confirm count: 0 2026-01-08T15:23:41.860366Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 2026-01-08T15:23:41.860385Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:972: Session [5:47:2048] [node 6] ICS23 confirm count: 0 |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut |82.4%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut >> TReplicaTest::IdempotencyUpdatesAliveSubscriber [GOOD] >> TReplicaTest::IdempotencyUpdatesVariant2 >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] >> TReplicaTest::Handshake >> TReplicaTest::AckNotifications [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation >> TReplicaTest::CommitWithoutHandshake >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut.cpp >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeUnknownPath [GOOD] Test command err: 2026-01-08T15:23:42.949845Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:42.949935Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:42.950056Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2026-01-08T15:23:42.950111Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:7:2054] Upsert description: path# path 2026-01-08T15:23:42.950237Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:42.950362Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:10:2057] 2026-01-08T15:23:42.950416Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:42.950581Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:42.950622Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:42.958272Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:42.969115Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:9:2056] 2026-01-08T15:23:42.969247Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [1:7:2054] Unsubscribe: subscriber# [1:9:2056], path# path 2026-01-08T15:23:42.969375Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:42.969427Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:42.969468Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.258428Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] >> TReplicaTest::Handshake [GOOD] >> TReplicaTest::DoubleUnsubscribe >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] >> TReplicaTest::CommitWithoutHandshake [GOOD] >> TReplicaTest::CommitWithStaleGeneration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TMonitoring::ReregisterTest [GOOD] Test command err: RUN TEST SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration >> NameserviceConfigValidatorTests::TestLongWalleDC [GOOD] >> NameserviceConfigValidatorTests::TestModifyClusterUUID [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForAddrPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] >> TReplicaTest::DoubleUnsubscribe [GOOD] >> TReplicaTest::DoubleDelete |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath [GOOD] >> TReplicaCombinationTest::MigratedPathRecreation |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> CheckIntegrityMirror3dc::PlacementOkHandoff [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts >> TReplicaTest::CommitWithStaleGeneration [GOOD] >> TReplicaTest::Delete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] Test command err: 2026-01-08T15:23:41.798337Z node 4 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [4:22:2057] [node 3] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:42.439383Z node 5 :INTERCONNECT ERROR: interconnect_tcp_proxy.h:262: Proxy [5:1:2048] [node 6] ICP35 event processing took too much time PendingNodeInfo:262(21073630ns) {Configure:104(21069399ns) {ProcessConfigured:135(608946ns) {StartInitialHandshake:157(605577ns) {DropHandshakes:470(2149ns) {DropIncomingHandshake:442(375ns) DropOutgoingHandshake:457(480ns)} GenerateSessionVirtualId:992(1056ns) PrepareNewSessionHandshake:477(2100ns) {SwitchToState:179(863ns)}}}}} 2026-01-08T15:23:42.450125Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [5:20:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:43.005184Z node 8 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [8:22:2057] [node 7] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:23:43.007720Z node 7 :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [7:20:2058] [node 8] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default >> TRegistryTests::TestLock [GOOD] >> TRegistryTests::TestClasses [GOOD] >> TRegistryTests::TestDisableEnable [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow [GOOD] >> TestProtocols::TestHTTPRequest >> TReplicaTest::DoubleDelete [GOOD] |82.4%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] Test command err: RandomSeed# 5457414421863492511 SEND TEvPut with key [1:1:1:0:0:533504:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:533504:0] 2026-01-08T15:23:41.231749Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TReplicaTest::Merge >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] Test command err: 2026-01-08T15:23:43.100833Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2026-01-08T15:23:43.100899Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 2 2026-01-08T15:23:43.100977Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.101037Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:772: [1:7:2054] Reject handshake from stale populator: sender# [1:8:2055], owner# 1, generation# 1, pending generation# 2 2026-01-08T15:23:43.404256Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:23:43.404320Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:43.404425Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2026-01-08T15:23:43.404465Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.404598Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:43.404838Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.404876Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.411438Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.411636Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:43.411672Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:43.411720Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.411832Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.411866Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.411908Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:843: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.411967Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.412009Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.412123Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.412255Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2026-01-08T15:23:43.412314Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2026-01-08T15:23:43.745985Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:43.746044Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:43.746199Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.746245Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.746309Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.746411Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.750022Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.750092Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.750158Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.750260Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:43.750294Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# true 2026-01-08T15:23:43.750356Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 2] 2026-01-08T15:23:43.750488Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.750527Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.750568Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:843: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.750644Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.751395Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.751837Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:843: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 2] >> TReplicaTest::Delete [GOOD] >> TInterconnectTest::TestManyEvents [GOOD] >> TInterconnectTest::TestCrossConnect >> TReplicaTest::Subscribe |82.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] Test command err: 2026-01-08T15:23:43.156671Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.156738Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:43.156829Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.156881Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:7:2054] Commit generation: owner# 1, generation# 1 2026-01-08T15:23:43.156928Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2026-01-08T15:23:43.156965Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 2 2026-01-08T15:23:43.461549Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:9:2056] 2026-01-08T15:23:43.461614Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# path 2026-01-08T15:23:43.461756Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2026-01-08T15:23:43.461895Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:23:43.461936Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:43.462090Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.462124Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.473652Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.473835Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:9:2056] 2026-01-08T15:23:43.473927Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:43.473961Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:43.473992Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.474078Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [2:9:2056] 2026-01-08T15:23:43.864141Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:43.864206Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:43.864352Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.864403Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.864490Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.864601Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2026-01-08T15:23:43.864699Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2026-01-08T15:23:43.864809Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.864843Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.864900Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 3, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.865061Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:43.865099Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.865152Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:43.865238Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:7:2054] Upsert description: path# path 2026-01-08T15:23:43.865331Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2026-01-08T15:23:43.865399Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:43.865500Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [3:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 3 }: sender# [3:9:2056] |82.4%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber >> TReplicaTest::Merge [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers >> TBsLocalRecovery::WriteRestartReadHuge [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TestProtocols::TestHTTPRequest [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TReplicaTest::Subscribe [GOOD] >> ClusterBalancing::ClusterBalancingEvenDistributionNotPossible [GOOD] >> TReplicaTest::SubscribeUnknownPath >> BootstrapTabletsValidatorTests::TestNoNodeForTablet [GOOD] >> BootstrapTabletsValidatorTests::TestRequiredTablet [GOOD] >> BootstrapTabletsValidatorTests::TestImportantTablet |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyHost [GOOD] |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] |82.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers [GOOD] >> TReplicaTest::StrongNotificationAfterCommit >> NameserviceConfigValidatorTests::TestModifyIdForHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyIdForResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestModifyResolveHost [GOOD] >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] >> BootstrapTabletsValidatorTests::TestImportantTablet [GOOD] >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] >> ResourceBrokerConfigValidatorTests::TestMinConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestRepeatedQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoDefaultQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> TReplicaTest::SubscribeUnknownPath [GOOD] >> TReplicaTest::SyncVersion |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> TRegistryTests::TestDisableEnable [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::DoubleDelete [GOOD] Test command err: 2026-01-08T15:23:43.892374Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.892471Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.203651Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:23:44.203731Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.203915Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:44.203960Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:44.210823Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:44.210996Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2026-01-08T15:23:44.211078Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.211195Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2026-01-08T15:23:44.211269Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2026-01-08T15:23:44.211343Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2026-01-08T15:23:44.499693Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:44.499773Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.499935Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2026-01-08T15:23:44.499983Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:7:2054] Upsert description: path# path 2026-01-08T15:23:44.500057Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.500187Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:44.500221Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:44.500277Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:44.500425Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:44.500462Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:44.500495Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:44.500636Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:10:2057] 2026-01-08T15:23:44.500696Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.500795Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:44.500829Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] >> TReplicaTest::SyncVersion [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest >> BootstrapTabletsValidatorTests::TestUnknownNodeForTablet [GOOD] >> NameserviceConfigValidatorTests::TestAddNewNode [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingHostPort [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::Delete [GOOD] Test command err: 2026-01-08T15:23:44.010389Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:44.010452Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:978: [1:7:2054] Reject commit from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2026-01-08T15:23:44.010506Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:44.010543Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.299588Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2026-01-08T15:23:44.299686Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 0 2026-01-08T15:23:44.299774Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2026-01-08T15:23:44.299813Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.299904Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2026-01-08T15:23:44.299938Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [2:7:2054] Commit generation: owner# 1, generation# 1 2026-01-08T15:23:44.300013Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2026-01-08T15:23:44.300060Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:988: [2:7:2054] Reject commit from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2026-01-08T15:23:44.300108Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [2:8:2055] 2026-01-08T15:23:44.300138Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 2 2026-01-08T15:23:44.667944Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:44.668012Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.668167Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:44.668208Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# false 2026-01-08T15:23:44.674977Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 42, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:44.675175Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2026-01-08T15:23:44.675266Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.675402Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:10:2057] 2026-01-08T15:23:44.675495Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.675626Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:44.675666Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# true 2026-01-08T15:23:44.675718Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 42, LocalPathId: 1] 2026-01-08T15:23:44.675855Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:11:2058] 2026-01-08T15:23:44.675901Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:11:2058], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.676009Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:12:2059] 2026-01-08T15:23:44.676058Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:12:2059], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.676188Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:13:2060] 2026-01-08T15:23:44.676255Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:13:2060], path# path, domainOwnerId# 0, capabilities# |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TRegistryTests::TestAddGet [GOOD] >> TRegistryTests::TestCheckConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroQueueWeight [GOOD] >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] >> NameserviceConfigValidatorTests::TestEmptyConfig [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingId [GOOD] >> NameserviceConfigValidatorTests::TestDuplicatingResolveHostPort [GOOD] >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestModifyPort [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] Test command err: 2026-01-08T15:23:43.545345Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.545431Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.545562Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:43.545608Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:7:2054] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.545727Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2026-01-08T15:23:43.545768Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.545854Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2026-01-08T15:23:43.545898Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:7:2054] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.546123Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 103 2026-01-08T15:23:43.546194Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.553927Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.554182Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:9:2056], cookie# 0, event size# 103 2026-01-08T15:23:43.554249Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.554413Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.554568Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:10:2057] 2026-01-08T15:23:43.554674Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2026-01-08T15:23:43.622218Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2026-01-08T15:23:43.622278Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:11:2058] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.622380Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2026-01-08T15:23:43.622437Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:11:2058] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.622522Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2026-01-08T15:23:43.622560Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:11:2058] Successful handshake: owner# 900, generation# 1 2026-01-08T15:23:43.622656Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2026-01-08T15:23:43.622690Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:11:2058] Commit generation: owner# 900, generation# 1 2026-01-08T15:23:43.622794Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:12:2059], cookie# 0, event size# 103 2026-01-08T15:23:43.622838Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.622899Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.623009Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:13:2060], cookie# 0, event size# 103 2026-01-08T15:23:43.623050Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], deletion# false 2026-01-08T15:23:43.623095Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:893: [1:11:2058] Replace GSS by TSS description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], domainId# [OwnerId: 800, LocalPathId: 2], curPathId# [OwnerId: 800, LocalPathId: 2], curDomainId# [OwnerId: 800, LocalPathId: 2] 2026-01-08T15:23:43.623172Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 900, LocalPathId: 1], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.623282Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:11:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:14:2061] 2026-01-08T15:23:43.623334Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:11:2058] Subscribe: subscriber# [1:14:2061], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2026-01-08T15:23:43.623728Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2026-01-08T15:23:43.623764Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:15:2062] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.623824Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2026-01-08T15:23:43.623868Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:15:2062] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.623926Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2026-01-08T15:23:43.623950Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:15:2062] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.623998Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2026-01-08T15:23:43.624045Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:15:2062] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.624115Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:16:2063], cookie# 0, event size# 103 2026-01-08T15:23:43.624146Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.624195Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.624298Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:17:2064], cookie# 0, event size# 103 2026-01-08T15:23:43.624329Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:43.624372Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 2, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:43.624453Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:15:2062] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:18:2065] 2026-01-08T15:23:43.624490Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:15:2062] Subscribe: subscriber# [1:18:2065], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2026-01-08T15:23:43.624844Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2026-01-08T15:23:43.624894Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:19:2066] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:43.624948Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:19:2066] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2026-01-08T15:23:43.624973Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:19:2066] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:43.625022Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Gener ... DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2026-01-08T15:23:44.283659Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2026-01-08T15:23:44.283699Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:399:2446] Successful handshake: owner# 910, generation# 1 2026-01-08T15:23:44.283740Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2026-01-08T15:23:44.283774Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [2:399:2446] Commit generation: owner# 910, generation# 1 2026-01-08T15:23:44.283816Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2026-01-08T15:23:44.283836Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:399:2446] Successful handshake: owner# 910, generation# 1 2026-01-08T15:23:44.283874Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2026-01-08T15:23:44.283894Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [2:399:2446] Commit generation: owner# 910, generation# 1 2026-01-08T15:23:44.283944Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:400:2447], cookie# 0, event size# 64 2026-01-08T15:23:44.283967Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2026-01-08T15:23:44.283987Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:399:2446] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2026-01-08T15:23:44.284053Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:401:2448], cookie# 0, event size# 130 2026-01-08T15:23:44.284078Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# false 2026-01-08T15:23:44.284110Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:843: [2:399:2446] Path was explicitly deleted, ignoring: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9] 2026-01-08T15:23:44.284181Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:399:2446] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:402:2449] 2026-01-08T15:23:44.284213Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:399:2446] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:23:44.284248Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:399:2446] Subscribe: subscriber# [2:402:2449], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2026-01-08T15:23:44.286000Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2026-01-08T15:23:44.286044Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:403:2450] Successful handshake: owner# 910, generation# 1 2026-01-08T15:23:44.286083Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2026-01-08T15:23:44.286111Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [2:403:2450] Commit generation: owner# 910, generation# 1 2026-01-08T15:23:44.286155Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2026-01-08T15:23:44.286179Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:403:2450] Successful handshake: owner# 910, generation# 1 2026-01-08T15:23:44.286221Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2026-01-08T15:23:44.286246Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [2:403:2450] Commit generation: owner# 910, generation# 1 2026-01-08T15:23:44.286344Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:404:2451], cookie# 0, event size# 64 2026-01-08T15:23:44.286374Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2026-01-08T15:23:44.286401Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:403:2450] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2026-01-08T15:23:44.286469Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:405:2452], cookie# 0, event size# 64 2026-01-08T15:23:44.286499Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2026-01-08T15:23:44.286558Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:403:2450] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:406:2453] 2026-01-08T15:23:44.286586Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:403:2450] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:23:44.286658Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:403:2450] Subscribe: subscriber# [2:406:2453], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2026-01-08T15:23:44.571421Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:44.571506Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:44.571618Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:44.571674Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [3:7:2054] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:44.571773Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2026-01-08T15:23:44.571816Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 900, generation# 1 2026-01-08T15:23:44.571885Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2026-01-08T15:23:44.571925Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [3:7:2054] Commit generation: owner# 900, generation# 1 2026-01-08T15:23:44.572074Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 118 2026-01-08T15:23:44.572122Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2026-01-08T15:23:44.572353Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2026-01-08T15:23:44.572625Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [3:9:2056], cookie# 0, event size# 117 2026-01-08T15:23:44.572677Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2026-01-08T15:23:44.572731Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:893: [3:7:2054] Update description by newest path form tenant schemeshard: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], domainId# [OwnerId: 800, LocalPathId: 1], curPathId# [OwnerId: 800, LocalPathId: 1111], curDomainId# [OwnerId: 800, LocalPathId: 1] 2026-01-08T15:23:44.572775Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [3:7:2054] Delete description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111] 2026-01-08T15:23:44.572839Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2026-01-08T15:23:44.572950Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 0 }: sender# [3:10:2057] 2026-01-08T15:23:44.573017Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# /root/db/dir_inside, domainOwnerId# 0, capabilities# =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 1111 PathOwnerId: 800 =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 11 PathOwnerId: 900 =========== DomainId: [OwnerId: 800, LocalPathId: 1] IsDeletion: 0 PathId: [OwnerId: 900, LocalPathId: 11] Versions: 1 >> ResourceBrokerConfigValidatorTests::TestRepeatedTaskName [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnknownQueue [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnlimitedResource [GOOD] >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestNoUnknownTask [GOOD] |82.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> ClusterBalancing::ClusterBalancingEvenDistributionNotPossible [GOOD] Test command err: RandomSeed# 13115784096699913249 2026-01-08T15:23:40.069699Z 5 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:4:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:40.072035Z 5 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:4:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6863255405824628876] 2026-01-08T15:23:40.090497Z 5 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:4:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:40.207319Z 4 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:3:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:40.209546Z 4 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:3:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16964695480389794514] 2026-01-08T15:23:40.230754Z 4 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:3:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:40.321836Z 2 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:1:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:40.324187Z 2 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:1:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 11518725439118163608] 2026-01-08T15:23:40.337083Z 2 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:1:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:40.892865Z 8 00h01m16.004608s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:40.894249Z 8 00h01m16.004608s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10928324931547373305] 2026-01-08T15:23:40.914001Z 8 00h01m16.004608s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:41.036549Z 3 00h01m17.005120s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:41.038758Z 3 00h01m17.005120s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12897547105984454779] 2026-01-08T15:23:41.101889Z 3 00h01m17.005120s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:2:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:41.215491Z 6 00h01m18.005632s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:5:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:41.217748Z 6 00h01m18.005632s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:5:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4587493077378161624] 2026-01-08T15:23:41.230738Z 6 00h01m18.005632s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000002:_:0:5:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:41.940454Z 1 00h01m31.006144s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:41.942926Z 1 00h01m31.006144s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13252731567025137484] 2026-01-08T15:23:41.954841Z 1 00h01m31.006144s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:42.105914Z 7 00h01m32.006656s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:6:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:42.108175Z 7 00h01m32.006656s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:6:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4660981726531708870] 2026-01-08T15:23:42.134119Z 7 00h01m32.006656s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000001:_:0:6:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TestProtocols::TestHTTPRequest [GOOD] |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage_2_ring_groups.cpp >> NameserviceConfigValidatorTests::TestRemoveTooMany >> TStateStorageConfig::DuplicatedNodesTest [GOOD] >> NameserviceConfigValidatorTests::TestRemoveTooMany [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyConfig [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyQueueName [GOOD] >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> BootstrapTabletsValidatorTests::TestCompactionBroker [GOOD] |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestDuplicatingAddrPort [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] Test command err: 2026-01-08T15:23:44.855277Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2026-01-08T15:23:44.855370Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:7:2054] Upsert description: path# path 2026-01-08T15:23:44.855489Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.855628Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:10:2057] 2026-01-08T15:23:44.855681Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:44.855756Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:44.855917Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:44.855965Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:44.856112Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:44.856156Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:44.864033Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:44.864402Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:44.864455Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:44.864559Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.172369Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:23:45.172543Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:45.172695Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2026-01-08T15:23:45.172741Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.172825Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:45.173007Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:45.173114Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:45.173189Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:45.173346Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:45.173387Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:45.173426Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.173513Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:9:2056] 2026-01-08T15:23:45.173574Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [2:7:2054] Unsubscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.173686Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:45.173746Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:45.173819Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:843: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.173920Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:45.173964Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2026-01-08T15:23:45.174028Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:45.174149Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2026-01-08T15:23:45.174215Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2026-01-08T15:23:45.476278Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 1 }: sender# [3:9:2056] 2026-01-08T15:23:45.476381Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:7:2054] Upsert description: path# path 2026-01-08T15:23:45.476456Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 1, capabilities# 2026-01-08T15:23:45.476598Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:45.476642Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:45.476726Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:45.476764Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [3:7:2054] Commit generation: owner# 1, generation# 1 2026-01-08T15:23:45.476874Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1006: [3:7:2054] Handle NKikimr::NSchemeBoard::TReplica::TEvPrivate::TEvSendStrongNotifications { Owner: 1 } |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestZeroDefaultDuration [GOOD] >> TReplicaTest::UpdateWithoutHandshake ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::SyncVersion [GOOD] Test command err: 2026-01-08T15:23:45.004428Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:23:45.004527Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:45.004714Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:45.004769Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:45.016521Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:23:45.016750Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2026-01-08T15:23:45.016855Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:45.017020Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2026-01-08T15:23:45.017066Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2026-01-08T15:23:45.017123Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:584: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:45.307887Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2026-01-08T15:23:45.307991Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# path 2026-01-08T15:23:45.308089Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:45.615395Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2026-01-08T15:23:45.615498Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:45.615660Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 76 2026-01-08T15:23:45.615721Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:45.615790Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 100500, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 32} 2026-01-08T15:23:45.615880Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2026-01-08T15:23:45.615947Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:7:2054] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:45.616046Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [3:7:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:8:2055], cookie# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] Test command err: RandomSeed# 6124443126899701023 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** >> TInterconnectTest::TestBlobEvent |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> NameserviceConfigValidatorTests::TestEmptyAddresses [GOOD] |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp >> TInterconnectTest::TestConnectAndDisconnect >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly [GOOD] >> TActorActivity::Basic [GOOD] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize >> ActorBootstrapped::TestBootstrapped >> TReplicaTest::UpdateWithoutHandshake [GOOD] >> TReplicaTest::UpdateWithStaleGeneration |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage_2_ring_groups.cpp |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestUnusedQueue [GOOD] >> ActorBootstrapped::TestBootstrapped [GOOD] >> ActorBootstrapped::TestBootstrappedParent |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/pgwire/pgwire |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/validators/ut/unittest >> ResourceBrokerConfigValidatorTests::TestEmptyTaskName [GOOD] |82.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |82.5%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire >> ActorBootstrapped::TestBootstrappedParent [GOOD] >> TActorTracker::Basic >> IcbAsActorTests::TestHttpGetResponse [GOOD] |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire >> TReplicaTest::UpdateWithStaleGeneration [GOOD] >> TInterconnectTest::TestBlobEvent [GOOD] >> TInterconnectTest::TestBlobEvent220Bytes >> IcbAsActorTests::TestHttpPostReaction |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |82.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] >> TActorTracker::Basic [GOOD] |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest >> TInterconnectTest::TestConnectAndDisconnect [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest |82.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/bsc_cache.cpp |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest >> TInterconnectTest::TestBlobEvent220Bytes [GOOD] >> TInterconnectTest::TestAddressResolve |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/validators/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfig::DuplicatedNodesTest [GOOD] Test command err: AnonRss: 11 CGroupLimit: 21 MemTotal: 31 MemAvailable: 41 AllocatedMemory: 51 AllocatorCachesMemory: 61 HardLimit: 71 SoftLimit: 81 TargetUtilization: 91 ExternalConsumption: 101 SharedCacheConsumption: 111 SharedCacheLimit: 121 MemTableConsumption: 131 MemTableLimit: 141 QueryExecutionConsumption: 151 QueryExecutionLimit: 161 AnonRss: 12 CGroupLimit: 22 MemTotal: 32 MemAvailable: 42 AllocatedMemory: 52 AllocatorCachesMemory: 62 HardLimit: 72 SoftLimit: 82 TargetUtilization: 92 ExternalConsumption: 102 SharedCacheConsumption: 112 SharedCacheLimit: 122 MemTableConsumption: 132 MemTableLimit: 142 QueryExecutionConsumption: 152 QueryExecutionLimit: 162 AnonRss: 13 CGroupLimit: 23 MemTotal: 33 MemAvailable: 43 AllocatedMemory: 53 AllocatorCachesMemory: 63 HardLimit: 73 SoftLimit: 83 TargetUtilization: 93 ExternalConsumption: 103 SharedCacheConsumption: 113 SharedCacheLimit: 123 MemTableConsumption: 133 MemTableLimit: 143 QueryExecutionConsumption: 153 QueryExecutionLimit: 163 AnonRss: 36 CGroupLimit: 66 MemTotal: 96 MemAvailable: 126 AllocatedMemory: 156 AllocatorCachesMemory: 186 HardLimit: 216 SoftLimit: 246 TargetUtilization: 276 SharedCacheConsumption: 336 SharedCacheLimit: 366 MemTableConsumption: 396 MemTableLimit: 426 QueryExecutionConsumption: 456 QueryExecutionLimit: 486 |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> TInterconnectTest::TestBlobEventPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/bsc_cache.cpp |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> TInterconnectTest::TestAddressResolve [GOOD] >> TInterconnectTest::OldNbs >> TBoardSubscriber2DCTest::DropByDisconnect >> TSubscriberCombinationsTest::CombinationsRootDomain |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UpdateWithStaleGeneration [GOOD] Test command err: 2026-01-08T15:23:47.132692Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:47.132781Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:806: [1:7:2054] Reject update from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2026-01-08T15:23:47.132896Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2026-01-08T15:23:47.132939Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:7:2054] Upsert description: path# path 2026-01-08T15:23:47.133086Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:47.133251Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2026-01-08T15:23:47.133337Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2026-01-08T15:23:47.133421Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2026-01-08T15:23:47.133473Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:47.133637Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:47.133744Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2026-01-08T15:23:47.133806Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:47.453271Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:23:47.453350Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:23:47.453520Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 0 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:23:47.453951Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:814: [2:7:2054] Reject update from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2026-01-08T15:23:47.454069Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2026-01-08T15:23:47.454112Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# path 2026-01-08T15:23:47.454194Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:23:47.454359Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2026-01-08T15:23:47.454419Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2026-01-08T15:23:47.454497Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:8:2055] 2026-01-08T15:23:47.454537Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:23:47.454610Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:23:47.454716Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:8:2055] 2026-01-08T15:23:47.454766Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] >> TBoardSubscriber2DCTest::NotAvailableByShutdown >> TBoardSubscriberTest::ReconnectReplica >> TInterconnectTest::TestBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels >> TBoardSubscriber2DCTest::ManySubscribersManyPublisher >> TBoardSubscriber2DCTest::SimpleSubscriber >> TBoardSubscriberTest::SimpleSubscriber |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TActorTracker::Basic [GOOD] Test command err: ASYNC_DESTROYER >> TBoardSubscriber2DCTest::ReconnectReplica >> TBoardSubscriberTest::ManySubscribersManyPublisher >> TBoardSubscriberTest::SimpleSubscriber [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TSubscriberSyncQuorumTest::TwoRingGroups |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TBoardSubscriberTest::DropByDisconnect >> TSubscriberSinglePathUpdateTest::OneRingGroup >> TSubscriberSinglePathUpdateTest::TwoRingGroups >> TBoardSubscriberTest::NotAvailableByShutdown |82.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ydb_stress_tool |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest >> TInterconnectTest::OldNbs [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/control/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp >> TSubscriberTest::StrongNotificationAfterCommit >> TBoardSubscriber2DCTest::DropByDisconnect [GOOD] >> TSubscriberSyncQuorumTest::TwoRingGroups [GOOD] >> TSubscriberTest::Boot >> TSubscriberTest::NotifyDelete >> TSubscriberSinglePathUpdateTest::TwoRingGroups [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest >> TSubscriberSinglePathUpdateTest::OneRingGroup [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TSubscriberSinglePathUpdateTest::ReplicaConfigMismatch >> TSubscriberSinglePathUpdateTest::OneDisconnectedRingGroup >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolIPV4Case [GOOD] >> ClusterBalancing::ClusterBalancingEvenDistribution [GOOD] >> FormatTimes::DurationMs [GOOD] |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TSubscriberTest::NotifyUpdate >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp >> TSubscriberTest::SyncPartial >> TBoardSubscriber2DCTest::NotAvailableByShutdown [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexWithSmallWriteBlocks [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] >> TSubscriberSinglePathUpdateTest::OneDisconnectedRingGroup [GOOD] >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup >> TSubscriberSinglePathUpdateTest::ReplicaConfigMismatch [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndNext [GOOD] >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup >> TBlobStorageHullSstIt::TestSeekBefore [GOOD] >> TSubscriberSyncQuorumTest::OneRingGroup >> TBoardSubscriber2DCTest::ManySubscribersManyPublisher [GOOD] >> TBoardSubscriber2DCTest::SimpleSubscriber [GOOD] >> TSubscriberTest::Boot [GOOD] >> TBoardSubscriberTest::DropByDisconnect [GOOD] >> TSubscriberSyncQuorumTest::ReplicaConfigMismatch >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] >> TSubscriberTest::Sync >> TSubscriberTest::NotifyDelete [GOOD] >> TSubscriberTest::InvalidNotification >> TBlobStorageHullOrderedSstsIt::TestSeekToFirst [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekToLast [GOOD] >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] >> TBoardSubscriber2DCTest::ReconnectReplica [GOOD] >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup [GOOD] >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup [GOOD] >> TSubscriberSyncQuorumTest::OneRingGroup [GOOD] >> TSubscriberSyncQuorumTest::OneSynchronizedRingGroup >> TBlobStorageHullWriteSst::BlockOneSstOneIndex [GOOD] >> TSubscriberTest::NotifyUpdate [GOOD] >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex >> TSubscriberTest::ReconnectOnFailure >> TBlobStorageHullSstIt::TestSeekToFirst [GOOD] >> TBlobStorageHullSstIt::TestSeekExactAndPrev [GOOD] >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] >> TSubscriberSyncQuorumTest::ReplicaConfigMismatch [GOOD] >> TSubscriberSyncQuorumTest::ReconfigurationWithDelayedSyncRequest |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldNbs [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationMs [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriber2DCTest::DropByDisconnect [GOOD] |82.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolIPV4Case [GOOD] >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] >> TSubscriberTest::SyncPartial [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica >> TBlobStorageHullSstIt::TestSstIndexSeekAndIterate [GOOD] >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] >> TSubscriberTest::Sync [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest >> ClusterBalancing::ClusterBalancingEvenDistribution [GOOD] Test command err: RandomSeed# 10335244258865843599 2026-01-08T15:23:45.078699Z 3 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:45.080918Z 3 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:2:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9850088865298996957] 2026-01-08T15:23:45.104381Z 3 00h01m01.003072s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:2:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:45.237366Z 8 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000002:_:0:7:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:45.239523Z 8 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000002:_:0:7:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 11705760854198144021] 2026-01-08T15:23:45.260103Z 8 00h01m02.003584s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000002:_:0:7:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:45.354636Z 6 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:5:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:45.356691Z 6 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:5:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 1662467100668547072] 2026-01-08T15:23:45.369469Z 6 00h01m03.004096s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:5:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:45.522715Z 2 00h01m04.004608s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:45.524791Z 2 00h01m04.004608s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16233760969558983166] 2026-01-08T15:23:45.538865Z 2 00h01m04.004608s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:46.089376Z 7 00h01m16.005120s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:6:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:46.091511Z 7 00h01m16.005120s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:6:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 7973984314100049541] 2026-01-08T15:23:46.101688Z 7 00h01m16.005120s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000001:_:0:6:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:46.214125Z 3 00h01m17.005632s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:2:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:46.216159Z 3 00h01m17.005632s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:2:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10248485317405572405] 2026-01-08T15:23:46.235324Z 3 00h01m17.005632s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:2:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:46.330162Z 5 00h01m18.006144s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:4:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:46.332163Z 5 00h01m18.006144s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:4:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17859050164586675018] 2026-01-08T15:23:46.347794Z 5 00h01m18.006144s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000003:_:0:4:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:46.500548Z 1 00h01m19.006656s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:46.502614Z 1 00h01m19.006656s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9777321515021416381] 2026-01-08T15:23:46.517359Z 1 00h01m19.006656s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:47.166523Z 2 00h01m31.007168s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:1:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:47.168769Z 2 00h01m31.007168s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:1:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10736429263331498944] 2026-01-08T15:23:47.181457Z 2 00h01m31.007168s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:1:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:47.309042Z 5 00h01m32.007680s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:4:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:47.310826Z 5 00h01m32.007680s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:4:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9525195957329818446] 2026-01-08T15:23:47.347959Z 5 00h01m32.007680s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:4:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:47.491680Z 1 00h01m33.008192s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:0:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:47.493991Z 1 00h01m33.008192s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:0:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16800164151748499204] 2026-01-08T15:23:47.507698Z 1 00h01m33.008192s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:0:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:47.735017Z 4 00h01m34.008704s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:3:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:47.737370Z 4 00h01m34.008704s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:3:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13159566534186243929] 2026-01-08T15:23:47.756535Z 4 00h01m34.008704s :BS_SYNCER ERROR: PDiskId# 1002 VDISK[82000000:_:0:3:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:48.470171Z 4 00h01m46.009216s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:3:0]: (2181038081) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:48.472634Z 4 00h01m46.009216s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:3:0]: (2181038081) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 8406335068927104423] 2026-01-08T15:23:48.485944Z 4 00h01m46.009216s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000001:_:0:3:0]: (2181038081) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:48.625244Z 6 00h01m47.009728s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:5:0]: (2181038082) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:48.627909Z 6 00h01m47.009728s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:5:0]: (2181038082) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4644093650577079784] 2026-01-08T15:23:48.652935Z 6 00h01m47.009728s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000002:_:0:5:0]: (2181038082) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:48.771773Z 8 00h01m48.010240s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:7:0]: (2181038083) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:48.774173Z 8 00h01m48.010240s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:7:0]: (2181038083) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6148197817324493980] 2026-01-08T15:23:48.789429Z 8 00h01m48.010240s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000003:_:0:7:0]: (2181038083) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:23:48.959649Z 7 00h01m49.010752s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:48.962074Z 7 00h01m49.010752s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5289614590826139421] 2026-01-08T15:23:48.977045Z 7 00h01m49.010752s :BS_SYNCER ERROR: PDiskId# 1003 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp >> TSubscriberSyncQuorumTest::OneSynchronizedRingGroup [GOOD] >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup >> TSubscriberTest::InvalidNotification [GOOD] >> TSubscriberSyncQuorumTest::ReconfigurationWithDelayedSyncRequest [GOOD] >> TSubscriberSyncQuorumTest::ReconfigurationWithCurrentSyncRequest >> TBsVDiskGC::GCPutKeepBarrierSync [GOOD] >> TBsVDiskGC::GCPutManyBarriersNoSync |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstOneIndexPartOutbound [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest >> RunLengthCodec::Random32 [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore [GOOD] >> TSubscriberTest::ReconnectOnFailure [GOOD] >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup [GOOD] |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriber2DCTest::ManySubscribersManyPublisher [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekAfterAndPrev [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriber2DCTest::NotAvailableByShutdown [GOOD] >> RunLengthCodec::Random64 >> TBsVDiskRepl1::ReadOnly >> TSubscriberSyncQuorumTest::ReconfigurationWithCurrentSyncRequest [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndexPartOutbound [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriber2DCTest::SimpleSubscriber [GOOD] |82.6%| [TA] $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] >> TBlobStorageSyncLogMem::FilledIn1PutAfterSnapshot [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf >> RunLengthCodec::Random64 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullOrderedSstsIt::TestSeekAfterAndPrev [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockOneSstMultiIndex [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::BlockMultiSstOneIndex [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriber2DCTest::ReconnectReplica [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSeekNotExactBefore [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Boot [GOOD] Test command err: ... waiting for initial path lookups 2026-01-08T15:23:49.668949Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}, {Replicas: [[1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0]] WriteOnly: 0 State: 1}]} 2026-01-08T15:23:49.670896Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:4:2051] 2026-01-08T15:23:49.670983Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2026-01-08T15:23:49.671016Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:49.671044Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:13:2060] 2026-01-08T15:23:49.671078Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:14:2061] 2026-01-08T15:23:49.671104Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:15:2062] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [1:2199047594611:0] Poisoning replica: [1:5497582477939:0] 2026-01-08T15:23:49.671399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:27:2074], cookie# 12345 2026-01-08T15:23:49.671467Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:29:2075] 2026-01-08T15:23:49.671572Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:30:2075] 2026-01-08T15:23:49.671601Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2026-01-08T15:23:49.671625Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:32:2075] 2026-01-08T15:23:49.671662Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:33:2075] 2026-01-08T15:23:49.671724Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:28:2075][TestPath] Set up state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.671776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2026-01-08T15:23:49.671822Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.671931Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:29:2075], cookie# 12345 2026-01-08T15:23:49.671981Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:30:2075], cookie# 12345 2026-01-08T15:23:49.672012Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:31:2075], cookie# 12345 2026-01-08T15:23:49.672055Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:32:2075], cookie# 12345 2026-01-08T15:23:49.672086Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:33:2075], cookie# 12345 2026-01-08T15:23:49.672128Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:34:2075], cookie# 12345 2026-01-08T15:23:49.672221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:4:2051], cookie# 12345 2026-01-08T15:23:49.672261Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:5:2052], cookie# 12345 2026-01-08T15:23:49.672326Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:13:2060], cookie# 12345 2026-01-08T15:23:49.672351Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:14:2061], cookie# 12345 2026-01-08T15:23:49.672462Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:29:2075], cookie# 12345 2026-01-08T15:23:49.672506Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:49.672553Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:30:2075], cookie# 12345 2026-01-08T15:23:49.672587Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:49.672615Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 0, failures# 0 2026-01-08T15:23:49.672686Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:31:2075], cookie# 12345 2026-01-08T15:23:49.672706Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.672731Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 0, failures# 0 2026-01-08T15:23:49.672757Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2026-01-08T15:23:49.672787Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.672817Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:32:2075], cookie# 12345 2026-01-08T15:23:49.672844Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.672863Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:49.672895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:33:2075], cookie# 12345 2026-01-08T15:23:49.672917Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.672954Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 1, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:49.672987Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:34:2075], cookie# 12345 2026-01-08T15:23:49.673008Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:28:2075][TestPath] Sync cookie mismatch: sender# [1:34:2075], cookie# 12345, current cookie# 0 2026-01-08T15:23:49.673060Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2026-01-08T15:23:49.673088Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [1:4398070850163:0] whose ring group state is: 1 2026-01-08T15:23:49.673175Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:27:2074], cookie# 12346 2026-01-08T15:23:49.673319Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:29:2075], cookie# 12346 2026-01-08T15:23:49.673393Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:30:2075], cookie# 12346 2026-01-08T15:23:49.673480Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:31:2075], cookie# 12346 2026-01-08T15:23:49.673532Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:49.673589Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:32:2075], cookie# 12346 2026-01-08T15:23:49.673622Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:33:2075], cookie# 12346 2026-01-08T15:23:49.673663Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:34:2075], cookie# 12346 2026-01-08T15:23:49.673686Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:49.673724Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:4:2051], cookie# 12346 2026-01-08T15:23:49.673780Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:5:2052], cookie# 12346 2026-01-08T15:23:49.673805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:13:2060], cookie# 12346 2026-01-08T15:23:49.673883Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:29:2075], cookie# 12346 2026-01-08T15:23:49.673902Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:49.673922Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:30:2075], cookie# 12346 2026-01-08T15:23:49.673958Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.673984Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:49.674008Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:32:2075], cookie# 12346 2026-01-08T15:23:49.674038Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.674088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:49.674115Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:33:2075], cookie# 12346 2026-01-08T15:23:49.674134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:49.674159Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][1:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2026-01-08T15:23:49.674186Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][1:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2026-01-08T15:23:49.674231Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:33:2075] 2026-01-08T15:23:49.674283Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.976384Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][2:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[2:24339059:0], [2:1099535966835:0], [2:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:49.977083Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:3:2050] 2026-01-08T15:23:49.977191Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:6:2053] 2026-01-08T15:23:49.977240Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:9:2056] 2026-01-08T15:23:49.977301Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2026-01-08T15:23:49.977403Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2026-01-08T15:23:49.977460Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][2:36:2066][path] Set up state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.977523Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2026-01-08T15:23:49.977594Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSinglePathUpdateTest::OneWriteOnlyRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2026-01-08T15:23:49.762099Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:19:2066][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:49.763785Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:4:2051] 2026-01-08T15:23:49.763850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2026-01-08T15:23:49.763896Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:49.764059Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2026-01-08T15:23:49.764147Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2026-01-08T15:23:49.764189Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:19:2066][TestPath] Set up state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.764226Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2026-01-08T15:23:49.764255Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:24339059:0] 2026-01-08T15:23:49.764486Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:4:2051] 2026-01-08T15:23:49.764534Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:20:2066] 2026-01-08T15:23:49.764570Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:19:2066][TestPath] Update to strong state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:1099535966835:0] 2026-01-08T15:23:49.764780Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:5:2052] 2026-01-08T15:23:49.764816Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:21:2066] 2026-01-08T15:23:49.764858Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:19:2066][TestPath] Path was updated to new version: owner# [1:18:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:2199047594611:0] 2026-01-08T15:23:49.764973Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:6:2053] 2026-01-08T15:23:49.765005Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:22:2066] 2026-01-08T15:23:49.765042Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:19:2066][TestPath] Path was updated to new version: owner# [1:18:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2026-01-08T15:23:50.050706Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][2:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[2:24339059:0], [2:1099535966835:0], [2:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.051462Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:4:2051] 2026-01-08T15:23:50.051542Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:5:2052] 2026-01-08T15:23:50.051589Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:50.051817Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:29:2075] 2026-01-08T15:23:50.051943Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:30:2075] 2026-01-08T15:23:50.052016Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][2:28:2075][TestPath] Set up state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.052063Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:31:2075] 2026-01-08T15:23:50.052105Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:24339059:0] 2026-01-08T15:23:50.052411Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:4:2051] 2026-01-08T15:23:50.052489Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:29:2075] 2026-01-08T15:23:50.052548Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][2:28:2075][TestPath] Update to strong state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:1099535966835:0] 2026-01-08T15:23:50.052854Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [2:5:2052] 2026-01-08T15:23:50.052912Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [2:30:2075] 2026-01-08T15:23:50.052967Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][2:28:2075][TestPath] Path was updated to new version: owner# [2:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:2199047594611:0] 2026-01-08T15:23:50.053264Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [2:6:2053] 2026-01-08T15:23:50.053325Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [2:31:2075] 2026-01-08T15:23:50.053375Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][2:28:2075][TestPath] Path was updated to new version: owner# [2:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [2:3298559222387:0] Sending path update to replica: [2:4398070850163:0] Sending path update to replica: [2:5497582477939:0] ... waiting for initial path lookups 2026-01-08T15:23:50.365602Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}, {Replicas: [[3:3298559222387:0], [3:4398070850163:0], [3:5497582477939:0]] WriteOnly: 1 State: 0}]} 2026-01-08T15:23:50.366393Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:4:2051] 2026-01-08T15:23:50.366467Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2026-01-08T15:23:50.366509Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:50.366757Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2075] 2026-01-08T15:23:50.366870Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:30:2075] 2026-01-08T15:23:50.366937Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:28:2075][TestPath] Set up state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.366985Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2026-01-08T15:23:50.367033Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:24339059:0] 2026-01-08T15:23:50.367321Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:4:2051] 2026-01-08T15:23:50.367404Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:29:2075] 2026-01-08T15:23:50.367476Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][3:28:2075][TestPath] Update to strong state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:1099535966835:0] 2026-01-08T15:23:50.367827Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [3:5:2052] 2026-01-08T15:23:50.367901Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [3:30:2075] 2026-01-08T15:23:50.367968Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][3:28:2075][TestPath] Path was updated to new version: owner# [3:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:2199047594611:0] 2026-01-08T15:23:50.368242Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [3:6:2053] 2026-01-08T15:23:50.368310Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [3:31:2075] 2026-01-08T15:23:50.368366Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][3:28:2075][TestPath] Path was updated to new version: owner# [3:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [3:3298559222387:0] Sending path update to replica: [3:4398070850163:0] Sending path update to replica: [3:5497582477939:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSyncQuorumTest::OneDisconnectedRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2026-01-08T15:23:49.737247Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}, {Replicas: [[1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0]] WriteOnly: 0 State: 1}]} 2026-01-08T15:23:49.741107Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:4:2051] 2026-01-08T15:23:49.741268Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2026-01-08T15:23:49.741318Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:49.741362Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:13:2060] 2026-01-08T15:23:49.741411Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:14:2061] 2026-01-08T15:23:49.741451Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:15:2062] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:49.741915Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:29:2075] 2026-01-08T15:23:49.742043Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:30:2075] 2026-01-08T15:23:49.742100Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:31:2075] 2026-01-08T15:23:49.742141Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:32:2075] 2026-01-08T15:23:49.742178Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:33:2075] 2026-01-08T15:23:49.742244Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:28:2075][TestPath] Set up state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.742341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:34:2075] 2026-01-08T15:23:49.742396Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:28:2075][TestPath] Ignore empty state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:24339059:0] 2026-01-08T15:23:49.742784Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:4:2051] 2026-01-08T15:23:49.742878Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:29:2075] 2026-01-08T15:23:49.742943Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Update to strong state: owner# [1:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:1099535966835:0] 2026-01-08T15:23:49.743248Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:5:2052] 2026-01-08T15:23:49.743325Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [1:30:2075] 2026-01-08T15:23:49.743399Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:2199047594611:0] 2026-01-08T15:23:49.743633Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:6:2053] 2026-01-08T15:23:49.743691Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 3 }: sender# [1:31:2075] 2026-01-08T15:23:49.743740Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 2) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:3298559222387:0] 2026-01-08T15:23:49.743961Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 4 }: sender# [1:13:2060] 2026-01-08T15:23:49.744021Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 4 }: sender# [1:32:2075] 2026-01-08T15:23:49.744131Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 3) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 4) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:4398070850163:0] 2026-01-08T15:23:49.744407Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 5 }: sender# [1:14:2061] 2026-01-08T15:23:49.744482Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 5 }: sender# [1:33:2075] 2026-01-08T15:23:49.744535Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 4) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 5) DomainId: AbandonedSchemeShards: there are 0 elements } Sending path update to replica: [1:5497582477939:0] 2026-01-08T15:23:49.744776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 6 }: sender# [1:15:2062] 2026-01-08T15:23:49.744834Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 6 }: sender# [1:34:2075] 2026-01-08T15:23:49.744881Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:28:2075][TestPath] Path was updated to new version: owner# [1:27:2074], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 5) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 6) DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2026-01-08T15:23:50.012484Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][2:19:2066][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[2:24339059:0], [2:1099535966835:0], [2:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.012872Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [2:4:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: TestPath DomainOwnerId: 1 }: sender# [2:23:2066] 2026-01-08T15:23:50.012916Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [2:4:2051] Upsert description: path# TestPath 2026-01-0 ... tion: 1 }: sender# [2:18:2065], cookie# 0, event size# 80 2026-01-08T15:23:50.020244Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:4:2051] Update description: path# TestPath, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:23:50.020302Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:4:2051] Upsert description: path# TestPath, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path TestPath, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 34} 2026-01-08T15:23:50.020407Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath PathId: [OwnerId: 1, LocalPathId: 1] Version: 2 }: sender# [2:4:2051] 2026-01-08T15:23:50.020504Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:520: [proxy][2:20:2066][TestPath] Cluster state mismatch in replica notification: sender# [2:23:2066], subscriber cluster state# {Generation: 0, GUID: 0}, replica cluster state# {Generation: 1 Guid: 0} 2026-01-08T15:23:50.020559Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [2:4:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [2:23:2066] 2026-01-08T15:23:50.020642Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:4:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: TestPath }: sender# [2:23:2066] 2026-01-08T15:23:50.020685Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [2:4:2051] Unsubscribe: subscriber# [2:23:2066], path# TestPath 2026-01-08T15:23:50.020748Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:20:2066] 2026-01-08T15:23:50.020797Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][2:19:2066][TestPath] Ignore empty state: owner# [2:18:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2026-01-08T15:23:50.372202Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.372893Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:4:2051] 2026-01-08T15:23:50.372963Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2026-01-08T15:23:50.373000Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [3:2199047594611:0] Poisoning replica: [3:3298559222387:0] Poisoning replica: [3:4398070850163:0] Poisoning replica: [3:5497582477939:0] 2026-01-08T15:23:50.373258Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:27:2074], cookie# 12345 2026-01-08T15:23:50.373349Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2075] 2026-01-08T15:23:50.373486Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:30:2075] 2026-01-08T15:23:50.373585Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:28:2075][TestPath] Set up state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.373667Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2026-01-08T15:23:50.373713Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.373822Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:29:2075], cookie# 12345 2026-01-08T15:23:50.373894Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:30:2075], cookie# 12345 2026-01-08T15:23:50.373947Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:31:2075], cookie# 12345 2026-01-08T15:23:50.374025Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:4:2051], cookie# 12345 2026-01-08T15:23:50.374073Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:5:2052], cookie# 12345 2026-01-08T15:23:50.374218Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:29:2075], cookie# 12345 2026-01-08T15:23:50.374261Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.374365Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:30:2075], cookie# 12345 2026-01-08T15:23:50.374403Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][3:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.374457Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:31:2075], cookie# 12345 2026-01-08T15:23:50.374491Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:28:2075][TestPath] Sync cookie mismatch: sender# [3:31:2075], cookie# 12345, current cookie# 0 2026-01-08T15:23:50.374528Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2026-01-08T15:23:50.374569Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [3:1099535966835:0] whose ring group state is: 0 2026-01-08T15:23:50.374709Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:27:2074], cookie# 12346 2026-01-08T15:23:50.374866Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:29:2075], cookie# 12346 2026-01-08T15:23:50.374924Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:30:2075], cookie# 12346 2026-01-08T15:23:50.374985Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:31:2075], cookie# 12346 2026-01-08T15:23:50.375023Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.375069Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:4:2051], cookie# 12346 2026-01-08T15:23:50.375158Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:29:2075], cookie# 12346 2026-01-08T15:23:50.375195Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:50.375237Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:30:2075], cookie# 12346 2026-01-08T15:23:50.375285Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][3:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2026-01-08T15:23:50.375331Z node 3 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][3:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2026-01-08T15:23:50.375400Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:30:2075] 2026-01-08T15:23:50.375458Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest |82.6%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::InvalidNotification [GOOD] Test command err: 2026-01-08T15:23:50.067267Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:37:2067][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.070182Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2026-01-08T15:23:50.070346Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2026-01-08T15:23:50.070406Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2026-01-08T15:23:50.070504Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2026-01-08T15:23:50.070565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2026-01-08T15:23:50.070620Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.070710Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2026-01-08T15:23:50.070772Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.071165Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:3:2050] 2026-01-08T15:23:50.071239Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:6:2053] 2026-01-08T15:23:50.071309Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:38:2067] 2026-01-08T15:23:50.071389Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:37:2067][path] Path was updated to new version: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.071476Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:39:2067] 2026-01-08T15:23:50.071643Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.549544Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.550272Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2026-01-08T15:23:50.550366Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2026-01-08T15:23:50.550409Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2026-01-08T15:23:50.550477Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:37:2066] 2026-01-08T15:23:50.550548Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:38:2066] 2026-01-08T15:23:50.550618Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:36:2066][path] Set up state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.550685Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:39:2066] 2026-01-08T15:23:50.550743Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:36:2066][path] Ignore empty state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.550888Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [3:35:2065] 2026-01-08T15:23:50.550931Z node 3 :SCHEME_BOARD_SUBSCRIBER ERROR: subscriber.cpp:818: [main][3:36:2066][path] Suspicious NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [3:35:2065] >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolANYCase [GOOD] >> BSCMovePDisk::PDiskMove_ErasureNone >> TBsVDiskGC::GCPutManyBarriersNoSync [GOOD] |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Sync [GOOD] Test command err: 2026-01-08T15:23:49.987406Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:49.989079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2026-01-08T15:23:49.989160Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:49.989187Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2026-01-08T15:23:49.989234Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2026-01-08T15:23:49.989279Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2026-01-08T15:23:49.989310Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.989346Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2026-01-08T15:23:49.989375Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.989627Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2026-01-08T15:23:49.989695Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2026-01-08T15:23:49.989739Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:49.989908Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:49.989978Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2026-01-08T15:23:49.990022Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.508021Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:37:2067][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.508598Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:3:2050] 2026-01-08T15:23:50.508658Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:6:2053] 2026-01-08T15:23:50.508694Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:9:2056] 2026-01-08T15:23:50.508757Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:38:2067] 2026-01-08T15:23:50.508803Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:39:2067] 2026-01-08T15:23:50.508856Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:37:2067][path] Set up state: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.508980Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:40:2067] 2026-01-08T15:23:50.509041Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:37:2067][path] Path was already updated: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.509147Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:35:2065], cookie# 1 2026-01-08T15:23:50.509227Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:38:2067], cookie# 1 2026-01-08T15:23:50.509291Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:39:2067], cookie# 1 2026-01-08T15:23:50.509327Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:40:2067], cookie# 1 2026-01-08T15:23:50.509452Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:3:2050], cookie# 1 2026-01-08T15:23:50.509502Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:6:2053], cookie# 1 2026-01-08T15:23:50.509529Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:9:2056], cookie# 1 2026-01-08T15:23:50.509600Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:38:2067], cookie# 1 2026-01-08T15:23:50.509642Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.509679Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:39:2067], cookie# 1 2026-01-08T15:23:50.509717Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][3:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.509774Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:40:2067], cookie# 1 2026-01-08T15:23:50.509797Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:37:2067][path] Sync cookie mismatch: sender# [3:40:2067], cookie# 1, current cookie# 0 |82.6%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::ReconnectOnFailure [GOOD] Test command err: 2026-01-08T15:23:50.237739Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.239514Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2026-01-08T15:23:50.239611Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:50.239646Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2026-01-08T15:23:50.239698Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2026-01-08T15:23:50.239762Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2026-01-08T15:23:50.239794Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.239836Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2026-01-08T15:23:50.239873Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.240299Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2026-01-08T15:23:50.240356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:37:2066] 2026-01-08T15:23:50.240409Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.738332Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][4:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.739749Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2026-01-08T15:23:50.739839Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2026-01-08T15:23:50.739911Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2026-01-08T15:23:50.740044Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2026-01-08T15:23:50.740110Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2026-01-08T15:23:50.740160Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][4:36:2066][path] Set up state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.740233Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2026-01-08T15:23:50.740284Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.740765Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2026-01-08T15:23:50.740828Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.740873Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2026-01-08T15:23:50.741001Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.741080Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2026-01-08T15:23:50.741136Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.752494Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2026-01-08T15:23:50.752641Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:37:2066] 2026-01-08T15:23:50.752723Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.752861Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:48:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2026-01-08T15:23:50.752909Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:49:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2026-01-08T15:23:50.752969Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:38:2066] 2026-01-08T15:23:50.753020Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.753124Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [4:39:2066] 2026-01-08T15:23:50.753159Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][4:36:2066][path] Ignore empty state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.753648Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][4:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [3:3:2050] 2026-01-08T15:23:50.753721Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][4:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [4:37:2066] 2026-01-08T15:23:50.753784Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][4:36:2066][path] Update to strong state: owner# [4:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } |82.6%| [TA] {RESULT} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/unittest |82.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] |82.6%| [TA] $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageSyncLogDsk::SeveralChunks [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_OnePageIndexed [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_SeveralPagesIndexed [GOOD] >> TBlobStorageSyncLogDsk::TrimLog [GOOD] >> CodecsTest::Basic [GOOD] >> CodecsTest::NaturalNumbersAndZero >> TBsVDiskRepl1::ReadOnly [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random32 >> CodecsTest::NaturalNumbersAndZero [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2026-01-08T15:23:50.368116Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:36:2066][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.370722Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2026-01-08T15:23:50.370856Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:50.370905Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2026-01-08T15:23:50.370976Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2026-01-08T15:23:50.371039Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2026-01-08T15:23:50.371083Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.371139Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2026-01-08T15:23:50.371184Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.371404Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2026-01-08T15:23:50.371540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2026-01-08T15:23:50.371590Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2026-01-08T15:23:50.371646Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 1 2026-01-08T15:23:50.371776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:6:2053], cookie# 1 2026-01-08T15:23:50.371815Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:9:2056], cookie# 1 2026-01-08T15:23:50.371895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:37:2066], cookie# 1 2026-01-08T15:23:50.371941Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.371986Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2026-01-08T15:23:50.372032Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.372096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:38:2066], cookie# 1 2026-01-08T15:23:50.372125Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:50.372157Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:39:2066], cookie# 1 2026-01-08T15:23:50.372194Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:36:2066][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:50.372311Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 2 2026-01-08T15:23:50.372413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:37:2066], cookie# 2 2026-01-08T15:23:50.372442Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.372476Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 2 2026-01-08T15:23:50.372532Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 2 2026-01-08T15:23:50.372605Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:9:2056], cookie# 2 2026-01-08T15:23:50.372718Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:38:2066], cookie# 2 2026-01-08T15:23:50.372750Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][1:36:2066][path] Sync is done in the ring group: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 2, partial# 1 2026-01-08T15:23:50.372783Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 2 2026-01-08T15:23:50.372878Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2026-01-08T15:23:50.372926Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.372971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:39:2066], cookie# 2 2026-01-08T15:23:50.372995Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:36:2066][path] Sync cookie mismatch: sender# [1:39:2066], cookie# 2, current cookie# 0 2026-01-08T15:23:50.373083Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 3 2026-01-08T15:23:50.373189Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:37:2066], cookie# 3 2026-01-08T15:23:50.373228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.373273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:38:2066], cookie# 3 2026-01-08T15:23:50.373314Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][1:36:2066][path] Sync is done in the ring group: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 2, partial# 1 2026-01-08T15:23:50.373342Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 3 2026-01-08T15:23:50.373385Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 3 2026-01-08T15:23:50.373509Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:39:2066], cookie# 3 2026-01-08T15:23:50.373541Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:36:2066][path] Sync cookie mismatch: sender# [1:39:2066], cookie# 3, current cookie# 0 2026-01-08T15:23:50.373599Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2026-01-08T15:23:50.373641Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.874173Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:37:2067][path] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.874989Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [3:3:2050] 2026-01-08T15:23:50.875087Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:6:2053] 2026-01-08T15:23:50.875138Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:9:2056] 2026-01-08T15:23:50.875229Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [3:38:2067] 2026-01-08T15:23:50.875299Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:39:2067] 2026-01-08T15:23:50.875369Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:37:2067][path] Set up state: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.875500Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [3:40:2067] 2026-01-08T15:23:50.875571Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:37:2067][path] Path was already updated: owner# [3:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.875721Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:35:2065], cookie# 1 2026-01-08T15:23:50.875854Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:38:2067], cookie# 1 2026-01-08T15:23:50.875939Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:39:2067], cookie# 1 2026-01-08T15:23:50.876002Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:40:2067], cookie# 1 2026-01-08T15:23:50.876091Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [3:3:2050], cookie# 1 2026-01-08T15:23:50.876149Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:6:2053], cookie# 1 2026-01-08T15:23:50.876190Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:9:2056], cookie# 1 2026-01-08T15:23:50.876258Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [3:38:2067], cookie# 1 2026-01-08T15:23:50.876323Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.876377Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:39:2067], cookie# 1 2026-01-08T15:23:50.876416Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][3:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.876475Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 Cluster State: { } }: sender# [3:40:2067], cookie# 1 2026-01-08T15:23:50.876501Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:37:2067][path] Sync cookie mismatch: sender# [3:40:2067], cookie# 1, current cookie# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSyncQuorumTest::OneWriteOnlyRingGroup [GOOD] Test command err: ... waiting for initial path lookups 2026-01-08T15:23:50.381638Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:19:2066][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.384290Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:4:2051] 2026-01-08T15:23:50.384403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2026-01-08T15:23:50.384473Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [1:2199047594611:0] 2026-01-08T15:23:50.384750Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12345 2026-01-08T15:23:50.384837Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2026-01-08T15:23:50.384977Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2026-01-08T15:23:50.385048Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:19:2066][TestPath] Set up state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.385111Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2026-01-08T15:23:50.385164Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.385284Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12345 2026-01-08T15:23:50.385357Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12345 2026-01-08T15:23:50.385403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:22:2066], cookie# 12345 2026-01-08T15:23:50.385512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:4:2051], cookie# 12345 2026-01-08T15:23:50.385597Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:5:2052], cookie# 12345 2026-01-08T15:23:50.385744Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:20:2066], cookie# 12345 2026-01-08T15:23:50.385802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.385867Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:21:2066], cookie# 12345 2026-01-08T15:23:50.385910Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:19:2066][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.385971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:22:2066], cookie# 12345 2026-01-08T15:23:50.385998Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:19:2066][TestPath] Sync cookie mismatch: sender# [1:22:2066], cookie# 12345, current cookie# 0 2026-01-08T15:23:50.386037Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2026-01-08T15:23:50.386099Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [1:1099535966835:0] whose ring group state is: 0 2026-01-08T15:23:50.386232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12346 2026-01-08T15:23:50.386394Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12346 2026-01-08T15:23:50.386461Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12346 2026-01-08T15:23:50.386545Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:22:2066], cookie# 12346 2026-01-08T15:23:50.386587Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.386672Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:4:2051], cookie# 12346 2026-01-08T15:23:50.386805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:20:2066], cookie# 12346 2026-01-08T15:23:50.386838Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:50.386876Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:21:2066], cookie# 12346 2026-01-08T15:23:50.386912Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][1:19:2066][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2026-01-08T15:23:50.386945Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][1:19:2066][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2026-01-08T15:23:50.387019Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2026-01-08T15:23:50.387075Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2026-01-08T15:23:50.674191Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][2:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[2:24339059:0], [2:1099535966835:0], [2:2199047594611:0]] WriteOnly: 0 State: 0}, {Replicas: [[2:3298559222387:0], [2:4398070850163:0], [2:5497582477939:0]] WriteOnly: 0 State: 1}]} 2026-01-08T15:23:50.675094Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:35:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:4:2051] 2026-01-08T15:23:50.675154Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:36:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:5:2052] 2026-01-08T15:23:50.675189Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:37:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:6:2053] 2026-01-08T15:23:50.675213Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:38:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:13:2060] 2026-01-08T15:23:50.675237Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:39:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:14:2061] 2026-01-08T15:23:50.675279Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][2:40:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:15:2062] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie ... ses# 0, failures# 1 2026-01-08T15:23:50.678638Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [2:32:2075], cookie# 12346 2026-01-08T15:23:50.678671Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:50.678693Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][2:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 1, failures# 1 2026-01-08T15:23:50.678731Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [2:33:2075], cookie# 12346 2026-01-08T15:23:50.678764Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2026-01-08T15:23:50.678791Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][2:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 1, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2026-01-08T15:23:50.678849Z node 2 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][2:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2026-01-08T15:23:50.678925Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][2:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [2:33:2075] 2026-01-08T15:23:50.678976Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][2:28:2075][TestPath] Ignore empty state: owner# [2:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ... waiting for initial path lookups 2026-01-08T15:23:50.944538Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:28:2075][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}, {Replicas: [[3:3298559222387:0], [3:4398070850163:0], [3:5497582477939:0]] WriteOnly: 1 State: 0}]} 2026-01-08T15:23:50.945028Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:4:2051] 2026-01-08T15:23:50.945072Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2026-01-08T15:23:50.945103Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [3:2199047594611:0] Poisoning replica: [3:3298559222387:0] Poisoning replica: [3:4398070850163:0] Poisoning replica: [3:5497582477939:0] 2026-01-08T15:23:50.945309Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:27:2074], cookie# 12345 2026-01-08T15:23:50.945383Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2075] 2026-01-08T15:23:50.945453Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:30:2075] 2026-01-08T15:23:50.945526Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:28:2075][TestPath] Set up state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.945587Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2026-01-08T15:23:50.945617Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.945680Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:29:2075], cookie# 12345 2026-01-08T15:23:50.945729Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:30:2075], cookie# 12345 2026-01-08T15:23:50.945775Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:34:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:31:2075], cookie# 12345 2026-01-08T15:23:50.945841Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:4:2051], cookie# 12345 2026-01-08T15:23:50.945883Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:5:2052], cookie# 12345 2026-01-08T15:23:50.945981Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:29:2075], cookie# 12345 2026-01-08T15:23:50.946015Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:28:2075][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.946060Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:30:2075], cookie# 12345 2026-01-08T15:23:50.946089Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][3:28:2075][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.946130Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:31:2075], cookie# 12345 2026-01-08T15:23:50.946153Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:28:2075][TestPath] Sync cookie mismatch: sender# [3:31:2075], cookie# 12345, current cookie# 0 2026-01-08T15:23:50.946188Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:31:2075] 2026-01-08T15:23:50.946217Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Poisoning replica: [3:24339059:0] whose ring group state is: 0 2026-01-08T15:23:50.946344Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:28:2075][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:27:2074], cookie# 12346 2026-01-08T15:23:50.946452Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:32:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:29:2075], cookie# 12346 2026-01-08T15:23:50.946501Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:30:2075], cookie# 12346 2026-01-08T15:23:50.946552Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:31:2075], cookie# 12346 2026-01-08T15:23:50.946583Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:28:2075][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.946648Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:33:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:5:2052], cookie# 12346 2026-01-08T15:23:50.946691Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [3:29:2075], cookie# 12346 2026-01-08T15:23:50.946713Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:1001: [main][3:28:2075][TestPath] Sync is done in the ring group: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 2, partial# 1 2026-01-08T15:23:50.946744Z node 3 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:1006: [main][3:28:2075][TestPath] Sync is incomplete in one of the ring groups: cookie# 12346 2026-01-08T15:23:50.946795Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2075] 2026-01-08T15:23:50.946834Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:28:2075][TestPath] Ignore empty state: owner# [3:27:2074], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.946863Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:28:2075][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:30:2075], cookie# 12346 2026-01-08T15:23:50.946897Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:28:2075][TestPath] Sync cookie mismatch: sender# [3:30:2075], cookie# 12346, current cookie# 0 >> CodecsTest::LargeAndRepeated [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberSyncQuorumTest::ReconfigurationWithCurrentSyncRequest [GOOD] >> NaiveFragmentWriterTest::Basic [GOOD] Test command err: ... waiting for initial path lookups 2026-01-08T15:23:50.408856Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:19:2066][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:50.410690Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:4:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: TestPath DomainOwnerId: 1 }: sender# [1:23:2066] 2026-01-08T15:23:50.410747Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:4:2051] Upsert description: path# TestPath 2026-01-08T15:23:50.410850Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:4:2051] Subscribe: subscriber# [1:23:2066], path# TestPath, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:50.411023Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:5:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: TestPath DomainOwnerId: 1 }: sender# [1:24:2066] 2026-01-08T15:23:50.411048Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:5:2052] Upsert description: path# TestPath 2026-01-08T15:23:50.411095Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:5:2052] Subscribe: subscriber# [1:24:2066], path# TestPath, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:50.411214Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: TestPath DomainOwnerId: 1 }: sender# [1:25:2066] 2026-01-08T15:23:50.411251Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:6:2053] Upsert description: path# TestPath 2026-01-08T15:23:50.411288Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:6:2053] Subscribe: subscriber# [1:25:2066], path# TestPath, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:50.411345Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:4:2051] 2026-01-08T15:23:50.411396Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:4:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:23:2066] 2026-01-08T15:23:50.411446Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:5:2052] 2026-01-08T15:23:50.411489Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:5:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:24:2066] 2026-01-08T15:23:50.411528Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:50.411571Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:25:2066] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR Poisoning replica: [1:2199047594611:0] 2026-01-08T15:23:50.411861Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12345 2026-01-08T15:23:50.411955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:20:2066] 2026-01-08T15:23:50.412130Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:21:2066] 2026-01-08T15:23:50.412204Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:19:2066][TestPath] Set up state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.412263Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2026-01-08T15:23:50.412304Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:50.412405Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12345 2026-01-08T15:23:50.412463Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:4:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:23:2066], cookie# 12345 2026-01-08T15:23:50.412548Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12345 2026-01-08T15:23:50.412586Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:5:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:24:2066], cookie# 12345 2026-01-08T15:23:50.412630Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:22:2066], cookie# 12345 2026-01-08T15:23:50.412702Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:4:2051], cookie# 12345 2026-01-08T15:23:50.412754Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:5:2052], cookie# 12345 2026-01-08T15:23:50.412895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:20:2066], cookie# 12345 2026-01-08T15:23:50.412940Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:50.413011Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:21:2066], cookie# 12345 2026-01-08T15:23:50.413059Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:19:2066][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:50.413119Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:22:2066], cookie# 12345 2026-01-08T15:23:50.413151Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:19:2066][TestPath] Sync cookie mismatch: sender# [1:22:2066], cookie# 12345, current cookie# 0 2026-01-08T15:23:50.413199Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [1:22:2066] 2026-01-08T15:23:50.413245Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:19:2066][TestPath] Ignore empty state: owner# [1:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } Updating cluster state generation on replica: [1:24339059:0] 2026-01-08T15:23:50.413363Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:18:2065], cookie# 12346 2026-01-08T15:23:50.413481Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:1240: [1:4:2051] Handle {EvUpdateGroupConfig GroupConfig: empty BoardConfig: empty SchemeBoardConfig: {RingGroups# [0:{NToSelect# 3 Rings# [0:{[[1:24339059:0]]} 1:{[[1:1099535966835:0]]} 2:{[[1:2199047594611:0]]}}] StateStorageVersion# 0 CompatibleVersions# [] ClusterStateGeneration# 1 ClusterStateGuid# 0}} 2026-01-08T15:23:50.413630Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:20:2066], cookie# 12346 2026-01-08T15:23:50.413688Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:4:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:23:2066], cookie# 12346 2026-01-08T15:23:50.413810Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:21:2066], cookie# 12346 2026-01-08T15:23:50.413843Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:5:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [1:24:2066], cookie# 12346 2026-01-08T15:23:50.413897Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 Cluster State: { } }: sender# [1:22:2066], cookie# 12346 2026-01-08T15:23:50.413939Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:19:2066][TestPath] Sync is in progress: cookie# 12346, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2026-01-08T15:23:50.414031Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { Generation: 1 Guid: 0 } }: sender# [1:4:2051], cookie# 12346 2026-01-08T15:23:50.414075Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [1:5:2052], cookie# 12346 2026-01-08T15:23:50.414155Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { Generation: 1 Guid: 0 } }: sender# [1:20:2066], cookie# 12346 2026-01-08T15:23:50.414247Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:952: [main][1:19:2066][TestPath] Cluster State mismatch in sync version response: sender# [1:20:2066], cookie# 12346, subscriber cluster state# {Generation: 0, GUID: 0}, replica cluster state# {Generation: 1 Guid: 0} 2026-01-08T15:23:50.414303Z node 1 :SCHEME_BOARD_S ... [TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:51.055164Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:4:2051] 2026-01-08T15:23:51.055235Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2026-01-08T15:23:51.055331Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:6:2053] ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 0 ... waiting for initial path lookups (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvNotifyBuilder from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:51.055593Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][3:19:2066][TestPath] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:18:2065], cookie# 12345 ... waiting for some sync responses 2026-01-08T15:23:51.055665Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:20:2066] 2026-01-08T15:23:51.055731Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:21:2066] 2026-01-08T15:23:51.055788Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:19:2066][TestPath] Set up state: owner# [3:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:51.055839Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:22:2066] 2026-01-08T15:23:51.055898Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:19:2066][TestPath] Ignore empty state: owner# [3:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:51.055987Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:20:2066], cookie# 12345 2026-01-08T15:23:51.056064Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:21:2066], cookie# 12345 2026-01-08T15:23:51.056109Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:22:2066], cookie# 12345 2026-01-08T15:23:51.056193Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:23:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:4:2051], cookie# 12345 2026-01-08T15:23:51.056238Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:24:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:5:2052], cookie# 12345 2026-01-08T15:23:51.056276Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:25:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:6:2053], cookie# 12345 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 12345 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 12345 ... blocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR cookie 12345 ... waiting for some sync responses (done) ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:51.056566Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:20:2066], cookie# 12345 2026-01-08T15:23:51.056618Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:51.056736Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:19:2066][TestPath] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:51.056775Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:1041: [main][3:19:2066][TestPath] Delay current sync request: 12345 ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR ... unblocking NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse from SCHEME_BOARD_SUBSCRIBER_PROXY_ACTOR to SCHEME_BOARD_SUBSCRIBER_ACTOR 2026-01-08T15:23:51.057229Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:21:2066], cookie# 12345 2026-01-08T15:23:51.057286Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:19:2066][TestPath] Sync cookie mismatch: sender# [3:21:2066], cookie# 12345, current cookie# 0 2026-01-08T15:23:51.057337Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:22:2066], cookie# 12345 2026-01-08T15:23:51.057361Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:19:2066][TestPath] Sync cookie mismatch: sender# [3:22:2066], cookie# 12345, current cookie# 0 2026-01-08T15:23:51.057852Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:30:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:4:2051] 2026-01-08T15:23:51.057916Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:31:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:5:2052] 2026-01-08T15:23:51.057979Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:32:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:6:2053] 2026-01-08T15:23:51.058040Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:27:2066] 2026-01-08T15:23:51.058095Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:28:2066] 2026-01-08T15:23:51.058167Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][3:19:2066][TestPath] Set up state: owner# [3:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:51.058231Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvNotify { Path: TestPath Version: 0 }: sender# [3:29:2066] 2026-01-08T15:23:51.058272Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][3:19:2066][TestPath] Ignore empty state: owner# [3:18:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:51.058372Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:30:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:27:2066], cookie# 12345 2026-01-08T15:23:51.058473Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:31:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:28:2066], cookie# 12345 2026-01-08T15:23:51.058529Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][3:32:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: TestPath }: sender# [3:29:2066], cookie# 12345 2026-01-08T15:23:51.058587Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:30:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:4:2051], cookie# 12345 2026-01-08T15:23:51.058634Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:31:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:5:2052], cookie# 12345 2026-01-08T15:23:51.058689Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][3:32:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:6:2053], cookie# 12345 2026-01-08T15:23:51.058776Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:27:2066], cookie# 12345 2026-01-08T15:23:51.058817Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][3:19:2066][TestPath] Sync is in progress: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:23:51.058874Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:28:2066], cookie# 12345 2026-01-08T15:23:51.058916Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][3:19:2066][TestPath] Sync is done in the ring group: cookie# 12345, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:23:51.058969Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][3:19:2066][TestPath] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 Cluster State: { } }: sender# [3:29:2066], cookie# 12345 2026-01-08T15:23:51.058997Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][3:19:2066][TestPath] Sync cookie mismatch: sender# [3:29:2066], cookie# 12345, current cookie# 0 >> NaiveFragmentWriterTest::Long |82.6%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest >> TBlobStorageSyncLogData::SerializeParseEmpty1_Proto [GOOD] >> TBlobStorageSyncLogData::SerializeParseEmpty2_Proto [GOOD] >> SemiSortedDeltaCodec::Random32 >> BSCMovePDisk::PDiskMove_ErasureNone [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random64 >> SemiSortedDeltaCodec::Random32 [GOOD] >> SemiSortedDeltaCodec::Random64 >> TBlobStorageSyncLogKeeper::CutLog_EntryPointNewFormat [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLog [GOOD] >> TBlobStorageSyncLogMem::FilledIn1 [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest >> TAwsSignature::SignWithCanonization [GOOD] >> TBlobStorageSyncLogDsk::AddByOne [GOOD] >> SemiSortedDeltaCodec::Random64 [GOOD] >> TBlobStorageSyncLogDsk::AddFive [GOOD] >> TBlobStorageSyncLogDsk::ComplicatedSerializeWithOverlapping [GOOD] >> TBlobStorageSyncLogDsk::DeleteChunks |82.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TDNSGatewaySuite::ShouldFilterDNSAddressedBasedOnProvidedProtocolANYCase [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> SemiSortedDeltaAndVarLengthCodec::Random64 [GOOD] >> SemiSortedDeltaCodec::BasicTest32 [GOOD] >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] >> NaiveFragmentWriterTest::Long [GOOD] >> ReorderCodecTest::Basic [GOOD] >> RunLengthCodec::BasicTest32 [GOOD] >> RunLengthCodec::BasicTest64 [GOOD] |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.6%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_board_subscriber/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::TrimLog [GOOD] >> HullReplWriteSst::Basic |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl1::ReadOnly [GOOD] Test command err: 2026-01-08T15:23:47.885380Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:47.951032Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9968663172900961394] 2026-01-08T15:23:48.983547Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |82.7%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> NaiveFragmentWriterTest::Basic [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest >> Donor::CheckOnlineReadRequestToDonor >> Donor::ContinueWithFaultyDonor |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TActorTest::TestCreateChildActor [GOOD] >> TActorTest::TestBlockEvents >> TActorTest::TestSendEvent [GOOD] >> TActorTest::TestSendAfterDelay >> TActorTest::TestBlockEvents [GOOD] >> TActorTest::TestScheduleEvent [GOOD] >> TActorTest::TestScheduleReaction |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TActorTest::TestWaitForFirstEvent >> Donor::MultipleEvicts >> TActorTest::TestScheduleReaction [GOOD] >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] >> TActorTest::TestSendAfterDelay [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh >> TActorTest::TestWaitForFirstEvent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> BSCMovePDisk::PDiskMove_ErasureNone [GOOD] Test command err: RandomSeed# 6636533697621484771 |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::BasicTest64 [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::Random64 [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest |82.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TAwsSignature::SignWithCanonization [GOOD] |82.7%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] |82.7%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsLocalRecovery::WriteRestartReadHugeIncreased [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeDecreased >> Donor::ConsistentWritesWhenSwitchingToDonorMode >> TActorTest::TestSendFromAnotherThread >> TopicNameConverterTest::LegacyStyleDoubleName [GOOD] >> TopicNameConverterTest::NoTopicName [GOOD] >> TBlobStorageIngress::IngressPartsWeMustHaveLocally [GOOD] >> TBlobStorageIngress::IngressLocalParts [GOOD] >> TIntervalSetTest::IntervalVecTestEmpty [GOOD] >> TBlobStorageIngress::IngressPrintDistribution [GOOD] >> TIntervalSetTest::IntervalVecTestAdd >> TIntervalSetTest::IntervalVecTestSpecificAdd [GOOD] >> TBlobStorageIngress::Ingress [GOOD] >> TBlobStorageIngress::IngressCacheMirror3 [GOOD] >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] >> TIntervalSetTest::IntervalVecTestAdd [GOOD] >> TIntervalSetTest::IntervalVecTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtract [GOOD] >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.7%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> RunLengthCodec::BasicTest64 [GOOD] >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestIsSubsetOfAgainstReference >> TIntervalSetTest::IntervalVecTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalVecUnion |82.7%| [TA] $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> TIntervalSetTest::IntervalVecUnion [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestScheduleReaction [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace [GOOD] >> TIntervalSetTest::IntervalVecUnionInplaceSelf [GOOD] |82.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendAfterDelay [GOOD] >> TIntrusiveFixedHashSetTest::TestEmptyFind [GOOD] >> TIntrusiveFixedHashSetTest::TestPushFindClear [GOOD] >> TIntrusiveHeapTest::TestEmpty [GOOD] >> TIntrusiveHeapTest::TestAddRemove [GOOD] >> TIntrusiveHeapTest::TestUpdateNoChange [GOOD] >> TIntrusiveHeapTest::TestUpdateIncrease [GOOD] >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] >> TIntrusiveStackTest::TestEmptyPop [GOOD] >> TIntrusiveStackTest::TestPushPop [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot [GOOD] >> VarLengthIntCodec::BasicTest32 [GOOD] >> TDNSGatewaySuite::ShouldUsePreviouslyKnownResolutionIfDNSIsNotResponding [GOOD] >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestBlockEvents [GOOD] Test command err: ... waiting for blocked 3 events ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 3 events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for blocked 1 more event ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 1 more event (done) ... waiting for processed 2 more events ... waiting for processed 2 more events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for processed 3 more events ... waiting for processed 3 more events (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitForFirstEvent [GOOD] Test command err: ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::NoTopicName [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressCache4Plus2 [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressPrintDistribution [GOOD] |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration >> TActorTest::TestSendFromAnotherThread [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] >> Donor::CheckOnlineReadRequestToDonor [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |82.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration >> VDiskTest::HugeBlobWrite |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageIngressMatrix::VectorTestMinus [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] |82.7%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageIngressMatrix::VectorTestEmpty [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction >> BSCMovePDisk::PDiskMove_Mirror3dc |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckWithoutQueryResult [GOOD] |82.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/common/http_gateway/ut/unittest >> TDNSGatewaySuite::ShouldUsePreviouslyKnownResolutionIfDNSIsNotResponding [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp >> TBlobStorageIngressMatrix::VectorTestIterator1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::BasicTest32 [GOOD] |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestSendFromAnotherThread [GOOD] >> TBlobStorageIngressMatrix::VectorTest [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore1 [GOOD] >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 15483419980913735912 SEND TEvPut with key [1:1:1:0:0:3201024:0] 2026-01-08T15:23:53.415757Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:23:53.416237Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2026-01-08T15:23:53.502922Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 |82.7%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 659857530223101864 2026-01-08T15:23:54.776723Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:54.778233Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 8312269161081931800] 2026-01-08T15:23:54.797282Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2026-01-08T15:23:54.797483Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 >> Donor::ContinueWithFaultyDonor [GOOD] |82.7%| [TA] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TA] {RESULT} $(B)/ydb/core/cms/console/validators/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> BSCMovePDisk::PDiskMove_Block42 |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator3 [GOOD] >> Donor::MultipleEvicts [GOOD] >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement2 [GOOD] |82.7%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest >> TFreshAppendixTest::IterateForwardIncluding [GOOD] >> TFreshAppendixTest::IterateForwardExcluding [GOOD] >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |82.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> TBlobStorageGroupInfoTest::TestBelongsToSubgroup [GOOD] |82.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] >> TBlobStorageGroupInfoTest::SubgroupPartLayout |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestIterator2 [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedMainBitVec [GOOD] |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> DiscoveryConverterTest::DiscoveryConverter [GOOD] >> DiscoveryConverterTest::EmptyModern [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest >> TFreshAppendixTest::IterateForwardAll [GOOD] >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |82.8%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_cluster_balancing/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] Test command err: RandomSeed# 2969513840464907077 2026-01-08T15:23:55.467359Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:55.469518Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 8037817213466535768] 2026-01-08T15:23:55.489670Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TBlobStorageIngress::IngressCreateFromRepl [GOOD] >> TBlobStorageIngress::IngressGetMainReplica [GOOD] >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |82.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |82.8%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/common/http_gateway/ut/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateForwardExcluding [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumBasicMirror3_4_2 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumBasic4Plus2_8_1 [GOOD] >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> TBlobStorageIngressMatrix::VectorTestBitwiseAnd [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitwiseComplement1 [GOOD] >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 8090101130346862328 0 donors: 2026-01-08T15:23:55.753629Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:55.754938Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:55.787497Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2026-01-08T15:23:55.910584Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:55.911834Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:55.933014Z 24 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2026-01-08T15:23:56.027435Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.028456Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.042905Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2026-01-08T15:23:56.113240Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.114204Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.128418Z 24 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2026-01-08T15:23:56.206720Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.207942Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.222699Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2026-01-08T15:23:56.313619Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.314984Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.331303Z 24 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2026-01-08T15:23:56.424789Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.426086Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.441512Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 2026-01-08T15:23:56.533429Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.534660Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.550066Z 24 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2026-01-08T15:23:56.644246Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:56.645436Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 5453879258792559246] 2026-01-08T15:23:56.660939Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 24:1000 |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::EmptyModern [GOOD] |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::IngressHandoffPartsDelete [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix |82.9%| [TA] $(B)/ydb/library/yql/providers/common/http_gateway/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TCircularQueueTest::ShouldRemove [GOOD] >> TCircularQueueTest::ShouldRemoveCurrent [GOOD] >> TCircularQueueTest::ShouldRemoveCurrentLast [GOOD] >> TConcurrentRWHashTest::TEmptyGetTest [GOOD] >> TConcurrentRWHashTest::TInsertTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTestFunc [GOOD] >> TConcurrentRWHashTest::TRemoveTest [GOOD] >> TConcurrentRWHashTest::TEraseTest [GOOD] >> TCowBTreeTest::Empty [GOOD] >> TCowBTreeTest::Basics [GOOD] >> TCowBTreeTest::ClearAndReuse >> TCowBTreeTest::ClearAndReuse [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngress::BarrierIngressQuorumMirror3 [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::RandomInsertInplace |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::VectorTestBitsBefore2 [GOOD] |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest |82.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TSTreeTest::Basic [GOOD] |83.0%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest >> TSVecTest::Basic [GOOD] >> TBlobStorageIngressMatrix::MatrixTest [GOOD] >> TBlobStorageIngressMatrix::ShiftedBitVecBase [GOOD] >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |83.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageHullFresh::SimpleBackWardEnd2Times [GOOD] >> TBlobStorageHullFresh::Perf >> BSCRestartPDisk::RestartNotAllowed |83.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |83.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> TBlobStorageHullFresh::AppendixPerf |83.0%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |83.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> TBsLocalRecovery::WriteRestartReadHugeDecreased [GOOD] >> TBsOther1::PoisonPill >> TBlobStorageGroupInfoIterTest::IteratorForwardAndBackward |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |83.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |83.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.0%| [TA] {RESULT} $(B)/ydb/library/yql/providers/common/http_gateway/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest >> TFreshAppendixTest::IterateBackwardAll [GOOD] >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] >> TBlobStorageGroupInfoIterTest::IteratorForwardAndBackward [GOOD] >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] |83.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TSVecTest::Basic [GOOD] |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed >> BSCRestartPDisk::RestartOneByOneWithReconnects |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] Test command err: 2026-01-08T15:23:28.119034Z :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:572: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVPut: TabletID cannot be empty; id# [0:1:10:0:0:10:1] Marker# BSVS43 2026-01-08T15:23:31.610792Z :BS_VDISK_OTHER ERROR: vdisk_context.h:143: PDiskId# 1 VDISK[0:_:0:0:0]: (0) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake' 2026-01-08T15:23:31.610921Z :BS_SKELETON ERROR: blobstorage_skeletonfront.cpp:1755: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake Marker# BSVSF03 ================================================================= ==26225==ERROR: LeakSanitizer: detected memory leaks Direct leak of 160 byte(s) in 1 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000075c2693 in NKikimr::NPDisk::TPDisk::LogFlush(NKikimr::NPDisk::TCompletionAction*, TVector>*, NKikimr::NPDisk::TReqId, NWilson::TTraceId*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:1151:50 #2 0x0000075b9e5e in NKikimr::NPDisk::TPDisk::ProcessLogWriteBatch(TVector>, TVector>) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:923:5 #3 0x0000075b7b52 in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:874:9 #4 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #5 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #6 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #7 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #8 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 4176 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000002ca9db8 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x000002ca9db8 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x000002ca9db8 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x000002ca9db8 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x000002ca9db8 in __grow_by_and_replace /-S/contrib/libs/cxxsupp/libcxx/include/string:2526:23 #6 0x000002ca9db8 in std::__y1::basic_string, std::__y1::allocator>::append(char const*, unsigned long) /-S/contrib/libs/cxxsupp/libcxx/include/string:2885:5 #7 0x000003815447 in Write /-S/util/stream/output.h:74:13 #8 0x000003815447 in google::protobuf::io::TOutputStreamProxy::Write(void const*, int) /-S/contrib/libs/protobuf/src/google/protobuf/messagext.cc:92:17 #9 0x0000037d49fd in WriteBuffer /-S/contrib/libs/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc:400:24 #10 0x0000037d49fd in google::protobuf::io::CopyingOutputStreamAdaptor::~CopyingOutputStreamAdaptor() /-S/contrib/libs/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc:313:3 #11 0x0000037fa14f in google::protobuf::Message::SerializeToArcadiaStream(IOutputStream*) const /-S/contrib/libs/protobuf/src/google/protobuf/message.cc:211:3 #12 0x0000086d453f in NKikimr::TSyncerDataSerializer::Serialize() const /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp:247:15 #13 0x0000089ae67a in Serialize /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:90:24 #14 0x0000089ae67a in NKikimr::TSyncerCommitter::GenerateCommit(NActors::TActorContext const&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:146:40 #15 0x0000089adbab in NKikimr::TSyncerCommitter::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:194:17 #16 0x0000089ad06e in NKikimr::TSyncerCommitter::StateFunc(TAutoPtr&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:244:9 #17 0x000004144397 in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:350:17 #18 0x0000042e6df1 in NActors::TExecutorThread::Execute(NActors::TMailbox*, bool) /-S/ydb/library/actors/core/executor_thread.cpp:267:28 #19 0x0000042f0a16 in NActors::TExecutorThread::ProcessExecutorPool()::$_0::operator()(NActors::TMailbox*, bool) const /-S/ydb/library/actors/core/executor_thread.cpp:455:39 #20 0x0000042effcd in NActors::TExecutorThread::ProcessExecutorPool() /-S/ydb/library/actors/core/executor_thread.cpp:507:13 #21 0x0000042f203e in NActors::TExecutorThread::ThreadProc() /-S/ydb/library/actors/core/executor_thread.cpp:533:9 #22 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #23 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 3200 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000072cbf83 in NKikimr::NPDisk::TReqCreator::CreateLogWrite(NKikimr::NPDisk::TEvLog&, NActors::TActorId const&, double&, NWilson::TTraceId) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h:247:27 #2 0x000007d4ebc6 in NKikimr::NPDisk::TPDiskActor::Handle(TAutoPtr, TDelete>&) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp:927:48 #3 0x000007d4e32f in NKikimr::NPDisk::TPDiskActor::StateOnline(TAutoPtr&) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp:1507:5 #4 0x000004144397 in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:350:17 #5 0x0000042e6df1 in NActors::TExecutorThread::Execute(NActors::TMailbox*, bool) /-S/ydb/library/actors/core/executor_thread.cpp:267:28 #6 0x0000042f0a16 in NActors::TExecutorThread::ProcessExecutorPool()::$_0::operator()(NActors::TMailbox*, bool) const /-S/ydb/library/actors/core/executor_thread.cpp:455:39 #7 0x0000042effcd in NActors::TExecutorThread::ProcessExecutorPool() /-S/ydb/library/actors/core/executor_thread.cpp:507:13 #8 0x0000042f203e in NActors::TExecutorThread::ThreadProc() /-S/ydb/library/actors/core/executor_thread.cpp:533:9 #9 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #10 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 448 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000075c0a87 in NKikimr::NPDisk::TPDisk::LogWrite(NKikimr::NPDisk::TLogWrite&, TVector>&) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:1120:24 #2 0x0000075b8913 in NKikimr::NPDisk::TPDisk::ProcessLogWriteBatch(TVector>, TVector>) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:905:13 #3 0x0000075b7b52 in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:874:9 #4 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #5 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #6 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #7 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #8 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 320 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000007253e46 in NKikimr::NPDisk::TOwnerData::Reset(bool) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_state.h:253:58 #2 0x000007250348 in NKikimr::NPDisk::TPDisk::YardInitFinish(NKikimr::NPDisk::TYardInit&) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:2064:19 #3 0x0000072b8daa in NKikimr::NPDisk::TPDisk::ProcessYardInitSet() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:3724:17 #4 0x0000072c5535 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4015:5 #5 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #6 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #7 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #8 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 256 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x00000293bc10 in TIntrusivePtr, std::__y1::allocator>>, TStringPtrOps, std::__y1::allocator>>>> TBasicString>::Construct, std::__y1::allocator>>&>(TStdString, std::__y1::allocator>>&) /-S/util/generic/string.h:212:17 #2 0x00000293baed in TBasicString>::Clone() /-S/util/generic/string.h:233:9 #3 0x000002da31dc in Detach /-S/util/generic/string.h:381:13 #4 0x000002da31dc in MutRef /-S/util/generic/string.h:254:9 #5 0x000002da31dc in append /-S/util/generic/string.h:804:9 #6 0x000002da31dc in TStringOutput::DoWrite(void const*, unsigned long) /-S/util/stream/str.cpp:39:9 #7 0x0000086d4530 in Write /-S/util/stream/output.h:74:13 #8 0x0000086d4530 in NKikimr::TSyncerDataSerializer::Serialize() const /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data.cpp:246:13 #9 0x0000089ae67a in Serialize /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:90:24 #10 0x0000089ae67a in NKikimr::TSyncerCommitter::GenerateCommit(NActors::TActorContext const&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:146:40 #11 0x0000089adbab in NKikimr::TSyncerCommitter::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:194:17 #12 0x0000089ad06e in NKikimr::TSyncerCommitter::StateFunc(TAutoPtr&) /-S/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_committer.cpp:244:9 #13 0x000004144397 in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:350:17 #14 0x0000042e6df1 in NActors::TExecutorThread::Execute(NActors::TMailbox*, bool) /-S/ydb/library/actors/core/executor_thread.cpp:267:28 #15 0x0000042f0a16 in NActors::TExecutorThread::ProcessExecutorPool()::$_0::operator()(NActors::TMailbox*, bool) const /-S/ydb/library/actors/core/executor_thread.cpp:455:39 #16 0x0000042effcd in NActors::TExecutorThread::ProcessExecutorPool() /-S/ydb/library/actors/core/executor_thread.cpp:507:13 #17 0x0000042f203e in NActors::TExecutorThread::ThreadProc() /-S/ydb/library/actors/core/executor_thread.cpp:533:9 #18 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #19 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 216 byte(s) in 1 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000075b9bb7 in MakeHolder >, TVector >, TVector >, void> /-S/util/generic/ptr.h:386:23 #2 0x0000075b9bb7 in NKikimr::NPDisk::TPDisk::ProcessLogWriteBatch(TVector>, TVector>) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:921:18 #3 0x0000075b7b52 in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:874:9 #4 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #5 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #6 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #7 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #8 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 192 byte(s) in 8 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000072f8a63 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x0000072f8a63 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x0000072f8a63 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x0000072f8a63 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x0000072f8a63 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x0000072f8a63 in NKikimr::NPDisk::TEvLogResult::TRecord* std::__y1::vector>::__push_back_slow_path(NKikimr::NPDisk::TEvLogResult::TRecord&&) /-S/contrib/libs/cxxsupp/libcxx/include/vector:1541:47 #7 0x0000075c0d2c in push_back /-S/contrib/libs/cxxsupp/libcxx/include/vector:1569:13 #8 0x0000075c0d2c in NKikimr::NPDisk::TPDisk::LogWrite(NKikimr::NPDisk::TLogWrite&, TVector>&) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:1123:27 #9 0x0000075b8913 in NKikimr::NPDisk::TPDisk::ProcessLogWriteBatch(TVector>, TVector>) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:905:13 #10 0x0000075b7b52 in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:874:9 #11 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #12 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #13 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #14 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #15 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 64 byte(s) in 1 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000075b707a in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x0000075b707a in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x0000075b707a in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x0000075b707a in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x0000075b707a in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x0000075b707a in reserve /-S/contrib/libs/cxxsupp/libcxx/include/vector:1511:49 #7 0x0000075b707a in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:858:17 #8 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #9 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #10 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #11 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #12 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 Indirect leak of 64 byte(s) in 1 object(s) allocated from: #0 0x000002b7f21d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x0000075b6fd5 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x0000075b6fd5 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x0000075b6fd5 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x0000075b6fd5 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x0000075b6fd5 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x0000075b6fd5 in reserve /-S/contrib/libs/cxxsupp/libcxx/include/vector:1511:49 #7 0x0000075b6fd5 in NKikimr::NPDisk::TPDisk::ProcessLogWriteQueue() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp:856:19 #8 0x0000072c5371 in NKikimr::NPDisk::TPDisk::Update() /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp:4009:13 #9 0x0000072d18f5 in Exec /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:33:19 #10 0x0000072d18f5 in NKikimr::NPDisk::TPDiskThread::ThreadProc(void*) /-S/ydb/core/blobstorage/pdisk/blobstorage_pdisk_thread.h:27:44 #11 0x000002e9e374 in (anonymous namespace)::TPosixThread::ThreadProxy(void*) /-S/util/system/thread.cpp:245:20 #12 0x000002b42d36 in asan_thread_start(void*) /-S/contrib/libs/clang20-rt/lib/asan/asan_interceptors.cpp:239:28 SUMMARY: AddressSanitizer: 9096 byte(s) leaked in 52 allocation(s). |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardExcluding [GOOD] |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::PerFailDomainRange [GOOD] >> BSCRestartPDisk::RestartOneByOne >> TopicNameConverterTest::Paths [GOOD] >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/ingress/ut/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageHullFresh::Perf [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] >> TCowBTreeTest::RandomInsertInplace [GOOD] >> TCowBTreeTest::RandomInsertThreadSafe |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest >> TReplicaTest::Update >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::Perf [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TReplicaTest::Update [GOOD] >> TReplicaTest::UnsubscribeWithoutSubscribe >> BSCMovePDisk::PDiskMove_Block42 [GOOD] |83.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackwardEnd [GOOD] >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageCompStrat::Test1 >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] Test command err: RandomSeed# 11121774591044898739 2026-01-08T15:24:03.980820Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.980944Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.980984Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.981017Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.981051Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.981092Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.981126Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.981159Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.982340Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982419Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982486Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982536Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982583Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982644Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982690Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982736Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.982811Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.982864Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.982907Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.982944Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.982978Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.983013Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.983058Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.983097Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:03.986276Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986360Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986405Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986446Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986488Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986553Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986597Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:03.986641Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |83.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/driver_lib/run/kikimr_services_initializers.cpp |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] Test command err: RandomSeed# 18141554055761697728 2026-01-08T15:24:04.117933Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118047Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118109Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118175Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118218Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118254Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.118290Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.119810Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.119896Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.119955Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.120012Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.120060Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.120107Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.120161Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.120235Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120299Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120364Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120438Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120474Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120533Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.120572Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:04.127881Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.127986Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.128042Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.128118Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.128172Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.128226Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:04.128279Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> TBlobStorageCompStrat::Test1 [GOOD] |83.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |83.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalSelfHeal >> BSCMovePDisk::PDiskMove_Mirror3dc3Nodes |83.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] Test command err: 2026-01-08T15:24:04.960849Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2026-01-08T15:24:04.960911Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:24:04.961082Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2026-01-08T15:24:04.961121Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:24:04.974175Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:24:04.974349Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2026-01-08T15:24:04.974432Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2026-01-08T15:24:04.974553Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2026-01-08T15:24:04.974597Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2026-01-08T15:24:04.974656Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2026-01-08T15:24:04.974705Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2026-01-08T15:24:04.974794Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2026-01-08T15:24:04.974835Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:671: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2026-01-08T15:24:05.406816Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2026-01-08T15:24:05.406879Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [2:7:2054] Successful handshake: owner# 1, generation# 1 2026-01-08T15:24:05.406984Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2026-01-08T15:24:05.407027Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2026-01-08T15:24:05.407086Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2026-01-08T15:24:05.407162Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1084: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] >> ReadOnlyVDisk::TestGarbageCollect |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> BSCMovePDisk::PDiskMove_Block42 [GOOD] Test command err: RandomSeed# 13975581410677566229 |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_replica/unittest |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> TBlobStorageCompStrat::Test1 [GOOD] |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest >> BSCMovePDisk::PDiskMove_Mirror3dc [GOOD] |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> BsControllerTest::SelfHealBlock4Plus2 >> ReadOnlyVDisk::TestDiscover >> BsControllerTest::SelfHealMirror3dc >> SchemeRanges::RangesBorders [GOOD] >> TypesProto::Decimal22 [GOOD] >> SelfHealActorTest::SingleErrorDisk >> SelfHealActorTest::SingleErrorDisk [GOOD] >> ReadOnlyVDisk::TestReads >> BsControllerTest::DecommitRejected >> ReadOnlyVDisk::TestGetWithMustRestoreFirst >> BsControllerTest::TestLocalBrokenRelocation >> ReadOnlyVDisk::TestSync >> ReadOnlyVDisk::TestStorageLoad |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |83.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/librun.a |83.3%| [AR] {RESULT} $(B)/ydb/core/driver_lib/run/librun.a |83.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::Decimal22 [GOOD] |83.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest >> TInterconnectTest::TestCrossConnect [GOOD] >> TInterconnectTest::TestManyEventsWithReconnect |83.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/run/librun.a |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::SingleErrorDisk [GOOD] >> SelfHealActorTest::NoMoreThanOneReplicating >> SysViewQueryHistory::AddDedupRandom [GOOD] >> DiscoveryConverterTest::FullLegacyNames [GOOD] >> DiscoveryConverterTest::FirstClass [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> BSCMovePDisk::PDiskMove_Mirror3dc [GOOD] Test command err: RandomSeed# 210924299902870170 >> BsControllerTest::DecommitRejected [GOOD] >> TBsOther1::PoisonPill [GOOD] >> TBsOther1::ChaoticParallelWrite >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention [GOOD] >> DiscoveryConverterTest::FullLegacyPath [GOOD] >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |83.3%| [TA] $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.3%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] >> TopicNameConverterForCPTest::BadLegacyTopics [GOOD] >> TopicNameConverterForCPTest::BadModernTopics [GOOD] >> TInterconnectTest::TestManyEventsWithReconnect [GOOD] >> TInterconnectTest::TestEventWithPayloadSerialization |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] |83.3%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |83.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FirstClass [GOOD] |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedupRandom [GOOD] |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix [GOOD] >> BsControllerTest::TestLocalSelfHeal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList Test command err: 2026-01-08T15:24:09.081723Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2026-01-08T15:24:09.081797Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:09.081904Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2026-01-08T15:24:09.081938Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:09.082045Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2026-01-08T15:24:09.082108Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:09.082170Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2026-01-08T15:24:09.082197Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:09.084616Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2026-01-08T15:24:09.084666Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:09.084740Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2026-01-08T15:24:09.084770Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:09.084856Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2026-01-08T15:24:09.084893Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:09.084940Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2026-01-08T15:24:09.084967Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:09.085074Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2026-01-08T15:24:09.085276Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:09.085353Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2026-01-08T15:24:09.085385Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:09.085449Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2026-01-08T15:24:09.085477Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:09.085612Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2026-01-08T15:24:09.085644Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:09.085711Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2026-01-08T15:24:09.085740Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:09.085807Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2026-01-08T15:24:09.085840Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:09.085888Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2026-01-08T15:24:09.085914Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:09.128277Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:514:38] Status# ERROR ClientId# [1:514:38] ServerId# [0:0:0] PipeClient# [1:514:38] 2026-01-08T15:24:09.128977Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:515:20] Status# ERROR ClientId# [2:515:20] ServerId# [0:0:0] PipeClient# [2:515:20] 2026-01-08T15:24:09.129037Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:516:20] Status# ERROR ClientId# [3:516:20] ServerId# [0:0:0] PipeClient# [3:516:20] 2026-01-08T15:24:09.129077Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:517:20] Status# ERROR ClientId# [4:517:20] ServerId# [0:0:0] PipeClient# [4:517:20] 2026-01-08T15:24:09.129117Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:518:20] Status# ERROR ClientId# [5:518:20] ServerId# [0:0:0] PipeClient# [5:518:20] 2026-01-08T15:24:09.129154Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:519:20] Status# ERROR ClientId# [6:519:20] ServerId# [0:0:0] PipeClient# [6:519:20] 2026-01-08T15:24:09.129221Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:520:20] Status# ERROR ClientId# [7:520:20] ServerId# [0:0:0] PipeClient# [7:520:20] 2026-01-08T15:24:09.129263Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:521:20] Status# ERROR ClientId# [8:521:20] ServerId# [0:0:0] PipeClient# [8:521:20] 2026-01-08T15:24:09.129304Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:522:20] Status# ERROR ClientId# [9:522:20] ServerId# [0:0:0] PipeClient# [9:522:20] 2026-01-08T15:24:09.129345Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:523:20] Status# ERROR ClientId# [10:523:20] ServerId# [0:0:0] PipeClient# [10:523:20] 2026-01-08T15:24:09.129410Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:524:20] Status# ERROR ClientId# [11:524:20] ServerId# [0:0:0] PipeClient# [11:524:20] 2026-01-08T15:24:09.129467Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:525:20] Status# ERROR ClientId# [12:525:20] ServerId# [0:0:0] PipeClient# [12:525:20] 2026-01-08T15:24:09.129521Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:526:20] Status# ERROR ClientId# [13:526:20] ServerId# [0:0:0] PipeClient# [13:526:20] 2026-01-08T15:24:09.129561Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:527:20] Status# ERROR ClientId# [14:527:20] ServerId# [0:0:0] PipeClient# [14:527:20] 2026-01-08T15:24:09.129602Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:528:20] Status# ERROR ClientId# [15:528:20] ServerId# [0:0:0] PipeClient# [15:528:20] 2026-01-08T15:24:09.239133Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:09.239216Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:09.239253Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:09.239287Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:09.239357Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:09.239389Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:09.239433Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:09.239467Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:09.239540Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:09.239577Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:09.239632Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:09.239670Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:09.239705Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:09.239744Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:09.239788Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:09.251165Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:589:66] Status# OK ClientId# [1:589:66] ServerId# [1:618:67] PipeClient# [1:589:66] 2026-01-08T15:24:09.251239Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] State switched from 0 to 1 2026-01-08T15:24:09.268542Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:590:21] Status# OK ClientId# [2:590:21] ServerId# [1:619:68] PipeClient# [2:590:21] 2026-01-08T15:24:09.268592Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] State switched from 0 to 1 2026-01-08T15:24:09.268642Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:591:21] Status# OK ClientId# [3:591:21] ServerId# [1:620:69] PipeClient# [3:591:21] 2026-01-08T15:24:09.268686Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] State switched from 0 to 1 2026-01-08T15:24:09.268732Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:592:21] Status# OK ClientId# [4:592:21] ServerId# [1:621:70] PipeClient# [4:592:21] 2026-01-08T15:24:09.268767Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] State switched from 0 to 1 2026-01-08T15:24:09.268801Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:593:21] Status# OK ClientId# [5:593:21] ServerId# [1:622:71] PipeClient# [5:593:21] 2026-01-08T15:24:09.268822Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] State switched from 0 to 1 2026-01-08T15:24:09.268855Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:594:21] Status# OK ClientId# [6:594:21] ServerId# [1:623:72] PipeClient# [6:594:21] 2026-01-08T15:24:09.268877Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] State switched from 0 to 1 2026-01-08T15:24:09.268910Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:595:21] Status# OK ClientId# [7:595:21] ServerId# [1:624:73] PipeClient# [7:595:21] 2026-01-08T15:24:09.268947Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] State switched from 0 to 1 2026-01-08T15:24:09.269002Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:596:21] Status# OK ClientId# [8:596:21] ServerId# [1:625:74] PipeClient# [8:596:21] 2026-01-08T15:24:09.269031Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] State switched from 0 to 1 2026-01-08T15:24:09.269064Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:597:21] Status# OK ClientId# [9:597:21] ServerId# [1:626:75] PipeClient# [9:597:21] 2026-01-08T15:24:09.269087Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] State switched from 0 to 1 2026-01-08T15:24:09.269122Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:598:21] Status# OK ClientId# [10:598:21] ServerId# [1:627:76] PipeClient# [10:598:21] 2026-01-08T15:24:09.269144Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] State switched from 0 to 1 2026-01-08T15:24:09.269178Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:599:21] Status# OK ClientId# [11:599:21] ServerId# [1:628:77] PipeClient# [11:599:21] 2026-01-08T15:24:09.269218Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] State switched from 0 to 1 2026-01-08T15:24:09.269261Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:600:21] Status# OK ClientId# [12:600:21] ServerId# [1:629:78] PipeClient# [12:600:21] 2026-01-08T15:24:09.269285Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] State switched from 0 to 1 2026-01-08T15:24:09.269323Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:601:21] Status# OK ClientId# [13:601:21] ServerId# [1:630:79] PipeClient# [13:601:21] 2026-01-08T15:24:09.269356Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] State switched from 0 to 1 2026-01-08T15:24:09.269396Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:602:21] Status# OK ClientId# [14:602:21] ServerId# [1:631:80] PipeClient# [14:602:21] 2026-01-08T15:24:09.269434Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] State switched from 0 to 1 2026-01-08T15:24:09.269469Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:603:21] Status# OK ClientId# [15:603:21] ServerId# [1:632:81] PipeClient# [15:603:21] 2026-01-08T15:24:09.269492Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] State switched from 0 to 1 2026-01-08T15:24:09.272842Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:09.272919Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] PDiskId# 1000 VSlotId# 1000 created 2026-01-08T15:24:09.321884Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] status changed to INIT_PENDING 2026-01-08T15:24:09.323196Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:09.323266Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] PDiskId# 1000 VSlotId# 1000 created 2026-01-08T15:24:09.323350Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] status changed to INIT_PENDING 2026-01-08T15:24:09.323493Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2026-01-08T15:24:09.323539Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] PDiskId# 1000 VSlotId# 1000 created 2026-01-08T15:24:09.323610Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] status changed to INIT_PENDING 2026-01-08T15:24:09.323736Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2026-01-08T15:24:09.323786Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] PDiskId# 1000 VSlotId# 1000 created 2026-01-08T15:24:09.323860Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] status changed to INIT_PENDING 2026-01-08T15:24:09.323968Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:09.324004Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] PDiskId# 1000 VSlotId# 1000 created 2026-01-08T15:24:09.324045Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] status changed to INIT_PENDING 2026-01-08T1 ... 0:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.939497Z 2 00h01m16.946512s :BS_NODE DEBUG: [2] VDiskId# [80000001:1:2:1:0] status changed to READY 2026-01-08T15:24:09.939935Z 1 00h01m16.946512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.940298Z 1 00h01m20.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.940455Z 13 00h01m22.068512s :BS_NODE DEBUG: [13] VDiskId# [80000001:1:1:0:0] status changed to READY 2026-01-08T15:24:09.940947Z 1 00h01m22.068512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.941170Z 3 00h01m22.234512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to READY 2026-01-08T15:24:09.941638Z 1 00h01m22.234512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.941878Z 12 00h01m24.946512s :BS_NODE DEBUG: [12] VDiskId# [80000001:1:0:2:0] status changed to READY 2026-01-08T15:24:09.942228Z 1 00h01m24.946512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.942725Z 1 00h01m25.900512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.942957Z 13 00h01m26.757024s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] status changed to READY 2026-01-08T15:24:09.943443Z 1 00h01m26.757024s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.944237Z 7 00h01m26.757536s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2026-01-08T15:24:09.944301Z 7 00h01m26.757536s :BS_NODE DEBUG: [7] VDiskId# [80000000:1:2:0:0] destroyed 2026-01-08T15:24:09.944521Z 15 00h01m27.837512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to READY 2026-01-08T15:24:09.944856Z 1 00h01m27.837512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.945353Z 1 00h01m30.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.945920Z 1 00h01m31.946512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:719} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2026-01-08T15:24:09.946141Z 14 00h01m32.934536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] status changed to READY 2026-01-08T15:24:09.946777Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:76} Reassigner starting GroupId# 2147483648 2026-01-08T15:24:09.947647Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.947706Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:0:0] DiskIsOk# true 2026-01-08T15:24:09.948042Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948086Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:1:0] DiskIsOk# true 2026-01-08T15:24:09.948128Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948162Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:2:0] DiskIsOk# true 2026-01-08T15:24:09.948198Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948250Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:0:0] DiskIsOk# true 2026-01-08T15:24:09.948294Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948344Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:1:0] DiskIsOk# true 2026-01-08T15:24:09.948385Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948430Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:2:0] DiskIsOk# true 2026-01-08T15:24:09.948471Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948506Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:0:0] DiskIsOk# true 2026-01-08T15:24:09.948543Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:09.948598Z 1 00h01m32.934536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:1:0] DiskIsOk# true 2026-01-08T15:24:09.951591Z 1 00h01m32.935048s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:09.951689Z 1 00h01m32.935048s :BS_NODE DEBUG: [1] VDiskId# [80000000:3:0:0:0] -> [80000000:4:0:0:0] 2026-01-08T15:24:09.952378Z 1 00h01m32.935048s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483648 Items# [80000000:3:2:2:0]: 9:1000:1000 -> 15:1000:1001 ConfigTxSeqNo# 23 2026-01-08T15:24:09.952435Z 1 00h01m32.935048s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483648 Success# true 2026-01-08T15:24:09.952591Z 8 00h01m32.935048s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2026-01-08T15:24:09.952644Z 8 00h01m32.935048s :BS_NODE DEBUG: [8] VDiskId# [80000000:2:2:1:0] destroyed 2026-01-08T15:24:09.952771Z 2 00h01m32.935048s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:09.952832Z 2 00h01m32.935048s :BS_NODE DEBUG: [2] VDiskId# [80000000:3:0:1:0] -> [80000000:4:0:1:0] 2026-01-08T15:24:09.952945Z 3 00h01m32.935048s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2026-01-08T15:24:09.952995Z 3 00h01m32.935048s :BS_NODE DEBUG: [3] VDiskId# [80000000:3:0:2:0] -> [80000000:4:0:2:0] 2026-01-08T15:24:09.953109Z 4 00h01m32.935048s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2026-01-08T15:24:09.953169Z 4 00h01m32.935048s :BS_NODE DEBUG: [4] VDiskId# [80000000:3:1:0:0] -> [80000000:4:1:0:0] 2026-01-08T15:24:09.953260Z 5 00h01m32.935048s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:09.953324Z 5 00h01m32.935048s :BS_NODE DEBUG: [5] VDiskId# [80000000:3:1:1:0] -> [80000000:4:1:1:0] 2026-01-08T15:24:09.953445Z 6 00h01m32.935048s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2026-01-08T15:24:09.953548Z 6 00h01m32.935048s :BS_NODE DEBUG: [6] VDiskId# [80000000:3:1:2:0] -> [80000000:4:1:2:0] 2026-01-08T15:24:09.953632Z 9 00h01m32.935048s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2026-01-08T15:24:09.953726Z 13 00h01m32.935048s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2026-01-08T15:24:09.953785Z 13 00h01m32.935048s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] -> [80000000:4:2:0:0] 2026-01-08T15:24:09.953865Z 14 00h01m32.935048s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2026-01-08T15:24:09.953908Z 14 00h01m32.935048s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] -> [80000000:4:2:1:0] 2026-01-08T15:24:09.954002Z 15 00h01m32.935048s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:09.954046Z 15 00h01m32.935048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] PDiskId# 1000 VSlotId# 1001 created 2026-01-08T15:24:09.954122Z 15 00h01m32.935048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to INIT_PENDING 2026-01-08T15:24:09.955437Z 11 00h01m33.796512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to READY 2026-01-08T15:24:09.955916Z 15 00h01m33.939048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to REPLICATING 2026-01-08T15:24:09.956675Z 1 00h01m36.340512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to READY 2026-01-08T15:24:09.957109Z 14 00h01m36.947512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to READY 2026-01-08T15:24:09.962893Z 15 00h02m00.233048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to READY 2026-01-08T15:24:09.963945Z 9 00h02m00.233560s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2026-01-08T15:24:09.964010Z 9 00h02m00.233560s :BS_NODE DEBUG: [9] VDiskId# [80000000:3:2:2:0] destroyed |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.3%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] |83.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::BadModernTopics [GOOD] |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 5434202187580942490 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2026-01-08T15:24:07.413982Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:194:17] ServerId# [1:296:63] TabletId# 72057594037932033 PipeClientId# [3:194:17] 2026-01-08T15:24:07.414152Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:229:17] ServerId# [1:301:68] TabletId# 72057594037932033 PipeClientId# [8:229:17] 2026-01-08T15:24:07.414259Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:215:17] ServerId# [1:299:66] TabletId# 72057594037932033 PipeClientId# [6:215:17] 2026-01-08T15:24:07.414387Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:208:17] ServerId# [1:298:65] TabletId# 72057594037932033 PipeClientId# [5:208:17] 2026-01-08T15:24:07.414491Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:201:17] ServerId# [1:297:64] TabletId# 72057594037932033 PipeClientId# [4:201:17] 2026-01-08T15:24:07.414581Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:187:17] ServerId# [1:295:62] TabletId# 72057594037932033 PipeClientId# [2:187:17] 2026-01-08T15:24:07.414721Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:222:17] ServerId# [1:300:67] TabletId# 72057594037932033 PipeClientId# [7:222:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.3%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest >> DiscoveryConverterTest::AccountDatabase [GOOD] >> DiscoveryConverterTest::CmWay [GOOD] |83.4%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.4%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |83.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::CmWay [GOOD] |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |83.4%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TCowBTreeTest::RandomInsertThreadSafe [GOOD] >> TCowBTreeTest::MultipleSnapshots >> SysViewQueryHistory::StableMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] Test command err: Starting iteration 0 Starting iteration 1 Starting iteration 2 Starting iteration 3 Starting iteration 4 Starting iteration 5 Starting iteration 6 Starting iteration 7 Starting iteration 8 Starting iteration 9 Starting iteration 10 Starting iteration 11 Starting iteration 12 Starting iteration 13 Starting iteration 14 Starting iteration 15 Starting iteration 16 Starting iteration 17 Starting iteration 18 Starting iteration 19 Starting iteration 20 Starting iteration 21 Starting iteration 22 Starting iteration 23 Starting iteration 24 Starting iteration 25 Starting iteration 26 Starting iteration 27 Starting iteration 28 Starting iteration 29 Starting iteration 30 Starting iteration 31 Starting iteration 32 Starting iteration 33 Starting iteration 34 Starting iteration 35 Starting iteration 36 Starting iteration 37 Starting iteration 38 Starting iteration 39 Starting iteration 40 Starting iteration 41 Starting iteration 42 Starting iteration 43 Starting iteration 44 Starting iteration 45 Starting iteration 46 Starting iteration 47 Starting iteration 48 Starting iteration 49 0 0 0 1 0 3 0 7 0 15 0 31 0 63 0 127 0 255 0 511 0 1023 0 2047 0 4095 0 8191 0 16383 0 32767 0 65535 1 0 1 1 1 3 1 7 1 15 1 31 1 63 1 127 1 255 1 511 1 1023 1 2047 1 4095 1 8191 1 16383 1 32767 1 65535 3 0 3 1 3 3 3 7 3 15 3 31 3 63 3 127 3 255 3 511 3 1023 3 2047 3 4095 3 8191 3 16383 3 32767 3 65535 7 0 7 1 7 3 7 7 7 15 7 31 7 63 7 127 7 255 7 511 7 1023 7 2047 7 4095 7 8191 7 16383 7 32767 7 65535 15 0 15 1 15 3 15 7 15 15 15 31 15 63 15 127 15 255 15 511 15 1023 15 2047 15 4095 15 8191 15 16383 15 32767 15 65535 31 0 31 1 31 3 31 7 31 15 31 31 31 63 31 127 31 255 31 511 31 1023 31 2047 31 4095 31 8191 31 16383 31 32767 31 65535 63 0 63 1 63 3 63 7 63 15 63 31 63 63 63 127 63 255 63 511 63 1023 63 2047 63 4095 63 8191 63 16383 63 32767 63 65535 127 0 127 1 127 3 127 7 127 15 127 31 127 63 127 127 127 255 127 511 127 1023 127 2047 127 4095 127 8191 127 16383 127 32767 127 65535 255 0 255 1 255 3 255 7 255 15 255 31 255 63 255 127 255 255 255 511 255 1023 255 2047 255 4095 255 8191 255 16383 255 32767 255 65535 511 0 511 1 511 3 511 7 511 15 511 31 511 63 511 127 511 255 511 511 511 1023 511 2047 511 4095 511 8191 511 16383 511 32767 511 65535 1023 0 1023 1 1023 3 1023 7 1023 15 1023 31 1023 63 1023 127 1023 255 1023 511 1023 1023 1023 2047 1023 4095 1023 8191 1023 16383 1023 32767 1023 65535 2047 0 2047 1 2047 3 2047 7 2047 15 2047 31 2047 63 2047 127 2047 255 2047 511 2047 1023 2047 2047 2047 4095 2047 8191 2047 16383 2047 32767 2047 65535 4095 0 4095 1 4095 3 4095 7 4095 15 4095 31 4095 63 4095 127 4095 255 4095 511 4095 1023 4095 2047 4095 4095 4095 8191 4095 16383 4095 32767 4095 65535 8191 0 8191 1 8191 3 8191 7 8191 15 8191 31 8191 63 8191 127 8191 255 8191 511 8191 1023 8191 2047 8191 4095 8191 8191 8191 16383 8191 32767 8191 65535 16383 0 16383 1 16383 3 16383 7 16383 15 16383 31 16383 63 16383 127 16383 255 16383 511 16383 1023 16383 2047 16383 4095 16383 8191 16383 16383 16383 32767 16383 65535 32767 0 32767 1 32767 3 32767 7 32767 15 32767 31 32767 63 32767 127 32767 255 32767 511 32767 1023 32767 2047 32767 4095 32767 8191 32767 16383 32767 32767 32767 65535 65535 0 65535 1 65535 3 65535 7 65535 15 65535 31 65535 63 65535 127 65535 255 65535 511 65535 1023 65535 2047 65535 4095 65535 8191 65535 16383 65535 32767 65535 65535 |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalSelfHeal [GOOD] Test command err: 2026-01-08T15:24:06.889934Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2026-01-08T15:24:06.889989Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:06.890089Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2026-01-08T15:24:06.890115Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:06.890156Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2026-01-08T15:24:06.890179Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:06.890215Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2026-01-08T15:24:06.890237Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:06.890274Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2026-01-08T15:24:06.890296Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:06.890345Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2026-01-08T15:24:06.890371Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:06.890402Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2026-01-08T15:24:06.890433Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:06.890484Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2026-01-08T15:24:06.890509Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:06.890551Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2026-01-08T15:24:06.890573Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:06.890605Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2026-01-08T15:24:06.890626Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:06.890711Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2026-01-08T15:24:06.890733Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:06.890765Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2026-01-08T15:24:06.890796Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:06.890831Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2026-01-08T15:24:06.890850Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:06.890889Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2026-01-08T15:24:06.890909Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:06.890942Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2026-01-08T15:24:06.890976Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:06.891011Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2026-01-08T15:24:06.891031Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2026-01-08T15:24:06.891071Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2026-01-08T15:24:06.891091Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2026-01-08T15:24:06.891121Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2026-01-08T15:24:06.891140Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2026-01-08T15:24:06.891169Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2026-01-08T15:24:06.891190Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2026-01-08T15:24:06.891225Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2026-01-08T15:24:06.891245Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2026-01-08T15:24:06.891282Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2026-01-08T15:24:06.891303Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2026-01-08T15:24:06.891334Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2026-01-08T15:24:06.891354Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2026-01-08T15:24:06.891401Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2026-01-08T15:24:06.891438Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2026-01-08T15:24:06.891476Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2026-01-08T15:24:06.891497Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2026-01-08T15:24:06.891528Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2026-01-08T15:24:06.891552Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2026-01-08T15:24:06.891586Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2026-01-08T15:24:06.891609Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2026-01-08T15:24:06.891656Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2026-01-08T15:24:06.891681Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2026-01-08T15:24:06.891728Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2026-01-08T15:24:06.891749Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2026-01-08T15:24:06.891782Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2026-01-08T15:24:06.891805Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2026-01-08T15:24:06.891852Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2026-01-08T15:24:06.891877Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2026-01-08T15:24:06.891915Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2026-01-08T15:24:06.891948Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2026-01-08T15:24:06.891985Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2026-01-08T15:24:06.892007Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2026-01-08T15:24:06.892044Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2026-01-08T15:24:06.892069Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2026-01-08T15:24:06.892125Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2026-01-08T15:24:06.892147Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2026-01-08T15:24:06.892177Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2026-01-08T15:24:06.892201Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2026-01-08T15:24:06.892250Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2026-01-08T15:24:06.892272Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2026-01-08T15:24:06.913345Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2719:59] Status# ERROR ClientId# [1:2719:59] ServerId# [0:0:0] PipeClient# [1:2719:59] 2026-01-08T15:24:06.914836Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2720:41] Status# ERROR ClientId# [2:2720:41] ServerId# [0:0:0] PipeClient# [2:2720:41] 2026-01-08T15:24:06.914898Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2721:41] Status# ERROR ClientId# [3:2721:41] ServerId# [0:0:0] PipeClient# [3:2721:41] 2026-01-08T15:24:06.914945Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2722:41] Status# ERROR ClientId# [4:2722:41] ServerId# [0:0:0] PipeClient# [4:2722:41] 2026-01-08T15:24:06.914991Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2723:41] Status# ERROR ClientId# [5:2723:41] ServerId# [0:0:0] PipeClient# [5:2723:41] 2026-01-08T15:24:06.915032Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2724:41] Status# ERROR ClientId# [6:2724:41] ServerId# [0:0:0] PipeClient# [6:2724:41] 2026-01-08T15:24:06.915072Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2725:41] Status# ERROR ClientId# [7:2725:41] ServerId# [0:0:0] PipeClient# [7:2725:41] 2026-01-08T15:24:06.915113Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2726:41] Status# ERROR ClientId# [8:2726:41] ServerId# [0:0:0] PipeClient# [8:2726:41] 2026-01-08T15:24:06.915157Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2727:41] Status# ERROR ClientId# [9:2727:41] ServerId# [0:0:0] PipeClient# [9:2727:41] 2026-01-08T15:24:06.915198Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2728:41] Status# ERROR ClientId# [10:2728:41] ServerId# [0:0:0] PipeClient# [10:2728:41] 2026-01-08T15:24:06.915239Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2729:41] Status# ERROR ClientId# [11:2729:41] ServerId# [0:0:0] PipeClient# [11:2729:41] 2026-01-08T15:24:06.915273Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2730:41] Status# ERROR ClientId# [12:2730:41] ServerId# [0:0:0] PipeClient# [12:2730:41] 2026-01-08T15:24:06.915319Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2731:41] Status# ERROR ClientId# [13:2731:41] ServerId# [0:0:0] PipeClient# [13:2731:41] 2026-01-08T15:24:06.915403Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2732:41] Status# ERROR ClientId# [14:2732:41] ServerId# [0:0:0] PipeClient# [14:2732:41] 2026-01-08T15:24:06.915443Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2733:41] Status# ERROR ClientId# [15:2733:41] ServerId# [0:0:0] PipeClient# [15:2733:41] 2026-01-08T15:24:06.915491Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2734:41] Status# ERROR ClientId# [16:2734:41] ServerId# [0:0:0] PipeClient# [16:2734:41] 2026-01-08T15:24:06.915536Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2735:41] Status# ERROR ClientId# [17:2735:41] ServerId# [0:0:0] PipeClient# [17:2735:41] 2026-01-08T15:24:06.915584Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2736:41] Status# ERROR ClientId# [18:2736:41] ServerId# [0:0:0] PipeClient# [18:2736:41] 2026-01-08T15:24:06.915632Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2737:41] Status# ERROR ClientId# [19:2737:41] ServerId# [0:0:0] PipeClient# [19:2737:41] 2026-01-08T15:24:06.915668Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2738:41] Status# ERROR ClientId# [20:2738:41] ServerId# [0:0:0] PipeClient# [20:2738:41] 2026-01-08T15:24:06.915704Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2739:41] Status# ERROR ClientId# [21:2739:41] ServerId# [0:0:0] PipeClient# [21:2739:41] 2026-01-08T15:24:06.915760Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2740:41] Status# ERROR ClientId# [22:2740:41] ServerId# [0:0:0] PipeClient# [22:2740:41] 2026-01-08T15:24:06.915805Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2741:41] Status# ERROR ClientId# [23:2741:41] ServerId# [0:0:0] PipeClient# [23:2741:41] 2026-01-08T15:24:06.915838Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2742:41] Status# ERROR ClientId# [24:2742:41] ServerId# [0:0:0] PipeClient# [24:2742:41] 2026-01-08T15:24:06.915872Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2743:41] Status# ERROR ClientId# [25:2743:41] ServerId# [0:0:0] PipeClient# [25:2743:41] 2026-01-08T15:24:06.915911Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2744:41] Status# ERROR ClientId# [26:2744:41] ServerId# [0:0:0] PipeClient# [26:2744:41] 2026-01-08T15:24:06.915965Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2745:41] Status# ERROR ClientId# [27:2745:41] ServerId# [0:0:0] PipeClient# [27:2745:41] 2026-01-08T15:24:06.916007Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2746:41] Status# ERROR ClientId# [28:2746:41] ServerId# [0:0:0] PipeClient# [28:2746:41] 2026-01-08T15:24:06.916064Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2747:41] Status# ERROR ClientId# [29:2747:41] ServerId# [0:0:0] PipeClient# [29:2747:41] 2026-01-08T15:24:06.916125Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2748:41] Status# ERROR ClientId# [30:2748:41] ServerId# [0:0:0] PipeClient# [30:2748:41] 2026-01-08T15:24:06.916178Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2749:41] Status# ERROR ClientId# [31:2749:41] ServerId# [0:0:0] PipeClient# [31:2749:41] 2026-01-08T15:24:06.916220Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2750:41] Status# ERROR ClientId# [32:2750:41] ServerId# [0:0:0] PipeClient# [32:2750:41] 2026-01-08T15:24:06.916259Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2751:41] Status# ERROR ClientId# [33:2751:41] ServerId# [0:0:0] PipeClient# [33:2751:41] 2026-01-08T15:24:06.916300Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2752:41] Status# ERROR ClientId# [34:2752:41] ServerId# [0:0:0] PipeClient# [34:2752:41] 2026-01-08T15:24:06.916357Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2753:41] Status# ERROR ClientId# [35:2753:41 ... icated# true 2026-01-08T15:24:10.357652Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483670 VDiskId# [80000016:1:2:1:0] DiskIsOk# true 2026-01-08T15:24:10.357688Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483670 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.357719Z 1 00h05m00.104096s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483670 VDiskId# [80000016:1:2:2:0] DiskIsOk# true 2026-01-08T15:24:10.364059Z 1 00h05m00.104608s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483670 Items# [80000016:1:1:1:0]: 23:1000:1001 -> 23:1001:1010 ConfigTxSeqNo# 48 2026-01-08T15:24:10.364108Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483670 Success# true 2026-01-08T15:24:10.364268Z 35 00h05m00.104608s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2026-01-08T15:24:10.364330Z 35 00h05m00.104608s :BS_NODE DEBUG: [35] VDiskId# [80000016:1:2:1:0] -> [80000016:2:2:1:0] 2026-01-08T15:24:10.364441Z 2 00h05m00.104608s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:10.364496Z 2 00h05m00.104608s :BS_NODE DEBUG: [2] VDiskId# [80000016:1:0:2:0] -> [80000016:2:0:2:0] 2026-01-08T15:24:10.364582Z 20 00h05m00.104608s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2026-01-08T15:24:10.364626Z 20 00h05m00.104608s :BS_NODE DEBUG: [20] VDiskId# [80000016:1:1:0:0] -> [80000016:2:1:0:0] 2026-01-08T15:24:10.364744Z 23 00h05m00.104608s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.364799Z 23 00h05m00.104608s :BS_NODE DEBUG: [23] VDiskId# [80000016:2:1:1:0] PDiskId# 1001 VSlotId# 1010 created 2026-01-08T15:24:10.364886Z 23 00h05m00.104608s :BS_NODE DEBUG: [23] VDiskId# [80000016:2:1:1:0] status changed to INIT_PENDING 2026-01-08T15:24:10.365010Z 8 00h05m00.104608s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2026-01-08T15:24:10.365070Z 8 00h05m00.104608s :BS_NODE DEBUG: [8] VDiskId# [80000016:1:0:0:0] -> [80000016:2:0:0:0] 2026-01-08T15:24:10.365170Z 26 00h05m00.104608s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2026-01-08T15:24:10.365216Z 26 00h05m00.104608s :BS_NODE DEBUG: [26] VDiskId# [80000016:1:2:2:0] -> [80000016:2:2:2:0] 2026-01-08T15:24:10.365310Z 11 00h05m00.104608s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2026-01-08T15:24:10.365357Z 11 00h05m00.104608s :BS_NODE DEBUG: [11] VDiskId# [80000016:1:0:1:0] -> [80000016:2:0:1:0] 2026-01-08T15:24:10.365455Z 14 00h05m00.104608s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2026-01-08T15:24:10.365497Z 14 00h05m00.104608s :BS_NODE DEBUG: [14] VDiskId# [80000016:1:1:2:0] -> [80000016:2:1:2:0] 2026-01-08T15:24:10.365600Z 32 00h05m00.104608s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2026-01-08T15:24:10.365661Z 32 00h05m00.104608s :BS_NODE DEBUG: [32] VDiskId# [80000016:1:2:0:0] -> [80000016:2:2:0:0] 2026-01-08T15:24:10.365975Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:76} Reassigner starting GroupId# 2147483654 2026-01-08T15:24:10.367054Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367120Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:0:0:0] DiskIsOk# true 2026-01-08T15:24:10.367165Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367195Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:0:1:0] DiskIsOk# true 2026-01-08T15:24:10.367237Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367276Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:0:2:0] DiskIsOk# true 2026-01-08T15:24:10.367309Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367338Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:1:0:0] DiskIsOk# true 2026-01-08T15:24:10.367367Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367422Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:1:2:0] DiskIsOk# true 2026-01-08T15:24:10.367460Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367491Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:2:0:0] DiskIsOk# true 2026-01-08T15:24:10.367520Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367547Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:2:1:0] DiskIsOk# true 2026-01-08T15:24:10.367576Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483654 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:10.367619Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483654 VDiskId# [80000006:1:2:2:0] DiskIsOk# true 2026-01-08T15:24:10.373764Z 1 00h05m00.105120s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483654 Items# [80000006:1:1:1:0]: 23:1000:1000 -> 23:1002:1010 ConfigTxSeqNo# 49 2026-01-08T15:24:10.373814Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483654 Success# true 2026-01-08T15:24:10.373988Z 35 00h05m00.105120s :BS_NODE DEBUG: [35] NodeServiceSetUpdate 2026-01-08T15:24:10.374066Z 35 00h05m00.105120s :BS_NODE DEBUG: [35] VDiskId# [80000006:1:2:1:0] -> [80000006:2:2:1:0] 2026-01-08T15:24:10.374191Z 2 00h05m00.105120s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:10.374243Z 2 00h05m00.105120s :BS_NODE DEBUG: [2] VDiskId# [80000006:1:0:2:0] -> [80000006:2:0:2:0] 2026-01-08T15:24:10.374360Z 20 00h05m00.105120s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2026-01-08T15:24:10.374408Z 20 00h05m00.105120s :BS_NODE DEBUG: [20] VDiskId# [80000006:1:1:0:0] -> [80000006:2:1:0:0] 2026-01-08T15:24:10.374535Z 23 00h05m00.105120s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.374594Z 23 00h05m00.105120s :BS_NODE DEBUG: [23] VDiskId# [80000006:2:1:1:0] PDiskId# 1002 VSlotId# 1010 created 2026-01-08T15:24:10.374667Z 23 00h05m00.105120s :BS_NODE DEBUG: [23] VDiskId# [80000006:2:1:1:0] status changed to INIT_PENDING 2026-01-08T15:24:10.374778Z 8 00h05m00.105120s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2026-01-08T15:24:10.374828Z 8 00h05m00.105120s :BS_NODE DEBUG: [8] VDiskId# [80000006:1:0:0:0] -> [80000006:2:0:0:0] 2026-01-08T15:24:10.374912Z 26 00h05m00.105120s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2026-01-08T15:24:10.374963Z 26 00h05m00.105120s :BS_NODE DEBUG: [26] VDiskId# [80000006:1:2:2:0] -> [80000006:2:2:2:0] 2026-01-08T15:24:10.375089Z 11 00h05m00.105120s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2026-01-08T15:24:10.375195Z 11 00h05m00.105120s :BS_NODE DEBUG: [11] VDiskId# [80000006:1:0:1:0] -> [80000006:2:0:1:0] 2026-01-08T15:24:10.375291Z 14 00h05m00.105120s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2026-01-08T15:24:10.375335Z 14 00h05m00.105120s :BS_NODE DEBUG: [14] VDiskId# [80000006:1:1:2:0] -> [80000006:2:1:2:0] 2026-01-08T15:24:10.375429Z 32 00h05m00.105120s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2026-01-08T15:24:10.375486Z 32 00h05m00.105120s :BS_NODE DEBUG: [32] VDiskId# [80000006:1:2:0:0] -> [80000006:2:2:0:0] 2026-01-08T15:24:10.376750Z 23 00h05m01.707608s :BS_NODE DEBUG: [23] VDiskId# [80000016:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.377327Z 23 00h05m02.634048s :BS_NODE DEBUG: [23] VDiskId# [80000066:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.378142Z 23 00h05m03.189584s :BS_NODE DEBUG: [23] VDiskId# [80000036:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.379065Z 23 00h05m04.018560s :BS_NODE DEBUG: [23] VDiskId# [80000056:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.379833Z 23 00h05m04.245096s :BS_NODE DEBUG: [23] VDiskId# [80000026:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.380628Z 23 00h05m04.267536s :BS_NODE DEBUG: [23] VDiskId# [80000076:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.381496Z 23 00h05m04.622120s :BS_NODE DEBUG: [23] VDiskId# [80000006:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.383288Z 23 00h05m05.139072s :BS_NODE DEBUG: [23] VDiskId# [80000046:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:10.384233Z 23 00h05m08.148608s :BS_NODE DEBUG: [23] VDiskId# [80000016:2:1:1:0] status changed to READY 2026-01-08T15:24:10.385713Z 23 00h05m08.149120s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.385775Z 23 00h05m08.149120s :BS_NODE DEBUG: [23] VDiskId# [80000016:1:1:1:0] destroyed 2026-01-08T15:24:10.387048Z 23 00h05m18.471048s :BS_NODE DEBUG: [23] VDiskId# [80000066:2:1:1:0] status changed to READY 2026-01-08T15:24:10.388644Z 23 00h05m18.471560s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.388702Z 23 00h05m18.471560s :BS_NODE DEBUG: [23] VDiskId# [80000066:1:1:1:0] destroyed 2026-01-08T15:24:10.388855Z 23 00h05m19.497536s :BS_NODE DEBUG: [23] VDiskId# [80000076:2:1:1:0] status changed to READY 2026-01-08T15:24:10.390903Z 23 00h05m19.498048s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.390969Z 23 00h05m19.498048s :BS_NODE DEBUG: [23] VDiskId# [80000076:1:1:1:0] destroyed 2026-01-08T15:24:10.391499Z 23 00h05m21.304584s :BS_NODE DEBUG: [23] VDiskId# [80000036:2:1:1:0] status changed to READY 2026-01-08T15:24:10.393115Z 23 00h05m21.305096s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.393174Z 23 00h05m21.305096s :BS_NODE DEBUG: [23] VDiskId# [80000036:1:1:1:0] destroyed 2026-01-08T15:24:10.394553Z 23 00h05m31.274560s :BS_NODE DEBUG: [23] VDiskId# [80000056:2:1:1:0] status changed to READY 2026-01-08T15:24:10.396243Z 23 00h05m31.275072s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.396299Z 23 00h05m31.275072s :BS_NODE DEBUG: [23] VDiskId# [80000056:1:1:1:0] destroyed 2026-01-08T15:24:10.396500Z 23 00h05m31.298120s :BS_NODE DEBUG: [23] VDiskId# [80000006:2:1:1:0] status changed to READY 2026-01-08T15:24:10.398192Z 23 00h05m31.298632s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.398245Z 23 00h05m31.298632s :BS_NODE DEBUG: [23] VDiskId# [80000006:1:1:1:0] destroyed 2026-01-08T15:24:10.398775Z 23 00h05m33.779072s :BS_NODE DEBUG: [23] VDiskId# [80000046:2:1:1:0] status changed to READY 2026-01-08T15:24:10.400540Z 23 00h05m33.779584s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.400593Z 23 00h05m33.779584s :BS_NODE DEBUG: [23] VDiskId# [80000046:1:1:1:0] destroyed 2026-01-08T15:24:10.401835Z 23 00h05m38.765096s :BS_NODE DEBUG: [23] VDiskId# [80000026:2:1:1:0] status changed to READY 2026-01-08T15:24:10.403754Z 23 00h05m38.765608s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2026-01-08T15:24:10.403812Z 23 00h05m38.765608s :BS_NODE DEBUG: [23] VDiskId# [80000026:1:1:1:0] destroyed |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] >> TopicNameConverterForCPTest::CorrectLegacyTopics [GOOD] >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/actorlib_impl/ut/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge [GOOD] |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut >> DiscoveryConverterTest::MinimalName [GOOD] >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] >> DSProxyStrategyTest::Restore_block42 >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |83.4%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] |83.4%| [TS] {BAZEL_UPLOAD} ydb/library/persqueue/topic_parser/ut/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest |83.4%| [TA] $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] >> BSCMovePDisk::PDiskMove_Mirror3dc3Nodes [GOOD] |83.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/ydb-core-blobstorage-ut_blobstorage-ut_bridge |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/ydb-core-blobstorage-ut_blobstorage-ut_bridge |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |83.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] |83.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |83.4%| [TA] {RESULT} $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/ydb-core-blobstorage-ut_blobstorage-ut_bridge >> ReadOnlyVDisk::TestWrites >> DSProxyStrategyTest::Restore_mirror3dc |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] Test command err: 2026-01-08T15:24:09.713725Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2026-01-08T15:24:09.713771Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:09.713852Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2026-01-08T15:24:09.713874Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:09.713911Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2026-01-08T15:24:09.713931Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:09.713964Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2026-01-08T15:24:09.713983Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:09.714013Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2026-01-08T15:24:09.714032Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:09.714064Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2026-01-08T15:24:09.714082Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:09.714119Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2026-01-08T15:24:09.714151Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:09.714189Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2026-01-08T15:24:09.714211Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:09.714255Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2026-01-08T15:24:09.714276Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:09.714306Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2026-01-08T15:24:09.718387Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:09.718463Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2026-01-08T15:24:09.718486Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:09.718571Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2026-01-08T15:24:09.718603Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:09.718642Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2026-01-08T15:24:09.718663Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:09.718702Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2026-01-08T15:24:09.718723Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:09.718768Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2026-01-08T15:24:09.718797Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:09.718833Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2026-01-08T15:24:09.718852Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2026-01-08T15:24:09.718890Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2026-01-08T15:24:09.718909Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2026-01-08T15:24:09.718943Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2026-01-08T15:24:09.718962Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2026-01-08T15:24:09.718992Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2026-01-08T15:24:09.719011Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2026-01-08T15:24:09.719043Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2026-01-08T15:24:09.719063Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2026-01-08T15:24:09.719103Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2026-01-08T15:24:09.719124Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2026-01-08T15:24:09.719155Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2026-01-08T15:24:09.719176Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2026-01-08T15:24:09.719221Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2026-01-08T15:24:09.719253Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2026-01-08T15:24:09.719291Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2026-01-08T15:24:09.719311Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2026-01-08T15:24:09.719342Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2026-01-08T15:24:09.719365Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2026-01-08T15:24:09.719398Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2026-01-08T15:24:09.719419Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2026-01-08T15:24:09.719448Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2026-01-08T15:24:09.719469Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2026-01-08T15:24:09.719511Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2026-01-08T15:24:09.719533Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2026-01-08T15:24:09.719570Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2026-01-08T15:24:09.719590Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2026-01-08T15:24:09.719630Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2026-01-08T15:24:09.719653Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2026-01-08T15:24:09.719693Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2026-01-08T15:24:09.719724Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2026-01-08T15:24:09.719759Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2026-01-08T15:24:09.719780Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2026-01-08T15:24:09.719810Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2026-01-08T15:24:09.719841Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2026-01-08T15:24:09.719883Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2026-01-08T15:24:09.719903Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2026-01-08T15:24:09.719932Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2026-01-08T15:24:09.719955Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2026-01-08T15:24:09.719998Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2026-01-08T15:24:09.720017Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2026-01-08T15:24:09.763880Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2719:59] Status# ERROR ClientId# [1:2719:59] ServerId# [0:0:0] PipeClient# [1:2719:59] 2026-01-08T15:24:09.765178Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2720:41] Status# ERROR ClientId# [2:2720:41] ServerId# [0:0:0] PipeClient# [2:2720:41] 2026-01-08T15:24:09.765226Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2721:41] Status# ERROR ClientId# [3:2721:41] ServerId# [0:0:0] PipeClient# [3:2721:41] 2026-01-08T15:24:09.765263Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2722:41] Status# ERROR ClientId# [4:2722:41] ServerId# [0:0:0] PipeClient# [4:2722:41] 2026-01-08T15:24:09.765301Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2723:41] Status# ERROR ClientId# [5:2723:41] ServerId# [0:0:0] PipeClient# [5:2723:41] 2026-01-08T15:24:09.765333Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2724:41] Status# ERROR ClientId# [6:2724:41] ServerId# [0:0:0] PipeClient# [6:2724:41] 2026-01-08T15:24:09.765365Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2725:41] Status# ERROR ClientId# [7:2725:41] ServerId# [0:0:0] PipeClient# [7:2725:41] 2026-01-08T15:24:09.765398Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2726:41] Status# ERROR ClientId# [8:2726:41] ServerId# [0:0:0] PipeClient# [8:2726:41] 2026-01-08T15:24:09.765447Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2727:41] Status# ERROR ClientId# [9:2727:41] ServerId# [0:0:0] PipeClient# [9:2727:41] 2026-01-08T15:24:09.765485Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2728:41] Status# ERROR ClientId# [10:2728:41] ServerId# [0:0:0] PipeClient# [10:2728:41] 2026-01-08T15:24:09.765521Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2729:41] Status# ERROR ClientId# [11:2729:41] ServerId# [0:0:0] PipeClient# [11:2729:41] 2026-01-08T15:24:09.765555Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2730:41] Status# ERROR ClientId# [12:2730:41] ServerId# [0:0:0] PipeClient# [12:2730:41] 2026-01-08T15:24:09.765600Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2731:41] Status# ERROR ClientId# [13:2731:41] ServerId# [0:0:0] PipeClient# [13:2731:41] 2026-01-08T15:24:09.765666Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2732:41] Status# ERROR ClientId# [14:2732:41] ServerId# [0:0:0] PipeClient# [14:2732:41] 2026-01-08T15:24:09.765704Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2733:41] Status# ERROR ClientId# [15:2733:41] ServerId# [0:0:0] PipeClient# [15:2733:41] 2026-01-08T15:24:09.765748Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2734:41] Status# ERROR ClientId# [16:2734:41] ServerId# [0:0:0] PipeClient# [16:2734:41] 2026-01-08T15:24:09.765790Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2735:41] Status# ERROR ClientId# [17:2735:41] ServerId# [0:0:0] PipeClient# [17:2735:41] 2026-01-08T15:24:09.765832Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2736:41] Status# ERROR ClientId# [18:2736:41] ServerId# [0:0:0] PipeClient# [18:2736:41] 2026-01-08T15:24:09.765874Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2737:41] Status# ERROR ClientId# [19:2737:41] ServerId# [0:0:0] PipeClient# [19:2737:41] 2026-01-08T15:24:09.765909Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2738:41] Status# ERROR ClientId# [20:2738:41] ServerId# [0:0:0] PipeClient# [20:2738:41] 2026-01-08T15:24:09.765952Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2739:41] Status# ERROR ClientId# [21:2739:41] ServerId# [0:0:0] PipeClient# [21:2739:41] 2026-01-08T15:24:09.766005Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2740:41] Status# ERROR ClientId# [22:2740:41] ServerId# [0:0:0] PipeClient# [22:2740:41] 2026-01-08T15:24:09.766046Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2741:41] Status# ERROR ClientId# [23:2741:41] ServerId# [0:0:0] PipeClient# [23:2741:41] 2026-01-08T15:24:09.766082Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2742:41] Status# ERROR ClientId# [24:2742:41] ServerId# [0:0:0] PipeClient# [24:2742:41] 2026-01-08T15:24:09.766118Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2743:41] Status# ERROR ClientId# [25:2743:41] ServerId# [0:0:0] PipeClient# [25:2743:41] 2026-01-08T15:24:09.766155Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2744:41] Status# ERROR ClientId# [26:2744:41] ServerId# [0:0:0] PipeClient# [26:2744:41] 2026-01-08T15:24:09.766201Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2745:41] Status# ERROR ClientId# [27:2745:41] ServerId# [0:0:0] PipeClient# [27:2745:41] 2026-01-08T15:24:09.766248Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2746:41] Status# ERROR ClientId# [28:2746:41] ServerId# [0:0:0] PipeClient# [28:2746:41] 2026-01-08T15:24:09.766294Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2747:41] Status# ERROR ClientId# [29:2747:41] ServerId# [0:0:0] PipeClient# [29:2747:41] 2026-01-08T15:24:09.773328Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2748:41] Status# ERROR ClientId# [30:2748:41] ServerId# [0:0:0] PipeClient# [30:2748:41] 2026-01-08T15:24:09.773387Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2749:41] Status# ERROR ClientId# [31:2749:41] ServerId# [0:0:0] PipeClient# [31:2749:41] 2026-01-08T15:24:09.773502Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2750:41] Status# ERROR ClientId# [32:2750:41] ServerId# [0:0:0] PipeClient# [32:2750:41] 2026-01-08T15:24:09.773540Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2751:41] Status# ERROR ClientId# [33:2751:41] ServerId# [0:0:0] PipeClient# [33:2751:41] 2026-01-08T15:24:09.773576Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2752:41] Status# ERROR ClientId# [34:2752:41] ServerId# [0:0:0] PipeClient# [34:2752:41] 2026-01-08T15:24:09.773658Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2753:41] Status# ERROR ClientId# [35:2753:41 ... 25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000001:2:2:2:0] -> [80000001:3:2:2:0] 2026-01-08T15:24:15.195818Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000021:2:2:2:0] -> [80000021:3:2:2:0] 2026-01-08T15:24:15.195873Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000031:2:2:2:0] -> [80000031:3:2:2:0] 2026-01-08T15:24:15.195916Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000051:2:2:2:0] -> [80000051:3:2:2:0] 2026-01-08T15:24:15.195966Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000061:2:2:2:0] -> [80000061:3:2:2:0] 2026-01-08T15:24:15.196597Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2026-01-08T15:24:15.196648Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000010:2:1:0:0] -> [80000010:3:1:0:0] 2026-01-08T15:24:15.196692Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000040:2:1:0:0] -> [80000040:3:1:0:0] 2026-01-08T15:24:15.196748Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000070:2:1:0:0] -> [80000070:3:1:0:0] 2026-01-08T15:24:15.196791Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000001:2:1:1:0] -> [80000001:3:1:1:0] 2026-01-08T15:24:15.196829Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000021:2:1:1:0] -> [80000021:3:1:1:0] 2026-01-08T15:24:15.196878Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000031:2:1:1:0] -> [80000031:3:1:1:0] 2026-01-08T15:24:15.196919Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000051:2:1:1:0] -> [80000051:3:1:1:0] 2026-01-08T15:24:15.196968Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000061:2:1:1:0] -> [80000061:3:1:1:0] 2026-01-08T15:24:15.197009Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2026-01-08T15:24:15.197048Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2026-01-08T15:24:15.197091Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000022:1:1:2:0] -> [80000022:2:1:2:0] 2026-01-08T15:24:15.197142Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000032:1:1:2:0] -> [80000032:2:1:2:0] 2026-01-08T15:24:15.197188Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000042:1:1:2:0] -> [80000042:2:1:2:0] 2026-01-08T15:24:15.197226Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000052:1:1:2:0] -> [80000052:2:1:2:0] 2026-01-08T15:24:15.197264Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000062:1:1:2:0] -> [80000062:2:1:2:0] 2026-01-08T15:24:15.197304Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000072:1:1:2:0] -> [80000072:2:1:2:0] 2026-01-08T15:24:15.197966Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2026-01-08T15:24:15.198019Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000010:2:2:2:0] -> [80000010:3:2:2:0] 2026-01-08T15:24:15.198068Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000040:2:2:2:0] -> [80000040:3:2:2:0] 2026-01-08T15:24:15.198112Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000070:2:2:2:0] -> [80000070:3:2:2:0] 2026-01-08T15:24:15.198160Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2026-01-08T15:24:15.198204Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2026-01-08T15:24:15.198245Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000022:1:2:0:0] -> [80000022:2:2:0:0] 2026-01-08T15:24:15.198283Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000032:1:2:0:0] -> [80000032:2:2:0:0] 2026-01-08T15:24:15.202459Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000042:1:2:0:0] -> [80000042:2:2:0:0] 2026-01-08T15:24:15.202564Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000052:1:2:0:0] -> [80000052:2:2:0:0] 2026-01-08T15:24:15.202613Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000062:1:2:0:0] -> [80000062:2:2:0:0] 2026-01-08T15:24:15.202678Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000072:1:2:0:0] -> [80000072:2:2:0:0] 2026-01-08T15:24:15.203248Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2026-01-08T15:24:15.203305Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000010:2:1:1:0] -> [80000010:3:1:1:0] 2026-01-08T15:24:15.203349Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000040:2:1:1:0] -> [80000040:3:1:1:0] 2026-01-08T15:24:15.203416Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000070:2:1:1:0] -> [80000070:3:1:1:0] 2026-01-08T15:24:15.203481Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000001:2:1:2:0] -> [80000001:3:1:2:0] 2026-01-08T15:24:15.203524Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000021:2:1:2:0] -> [80000021:3:1:2:0] 2026-01-08T15:24:15.203561Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000031:2:1:2:0] -> [80000031:3:1:2:0] 2026-01-08T15:24:15.203605Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000051:2:1:2:0] -> [80000051:3:1:2:0] 2026-01-08T15:24:15.203654Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000061:2:1:2:0] -> [80000061:3:1:2:0] 2026-01-08T15:24:15.211771Z 5 01h25m01.181560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.212290Z 7 01h25m01.327560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to REPLICATING 2026-01-08T15:24:15.212727Z 2 01h25m01.387560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.213119Z 7 01h25m01.821560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to REPLICATING 2026-01-08T15:24:15.213478Z 10 01h25m01.924560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to REPLICATING 2026-01-08T15:24:15.213904Z 10 01h25m02.080560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to REPLICATING 2026-01-08T15:24:15.214891Z 10 01h25m02.109560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to REPLICATING 2026-01-08T15:24:15.215442Z 4 01h25m03.949560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.215902Z 4 01h25m04.044560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.216329Z 5 01h25m04.598560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.216814Z 2 01h25m04.601560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.217266Z 8 01h25m04.639560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to REPLICATING 2026-01-08T15:24:15.217751Z 4 01h25m04.857560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.225673Z 7 01h25m05.107560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to REPLICATING 2026-01-08T15:24:15.226403Z 7 01h25m05.208560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to REPLICATING 2026-01-08T15:24:15.226981Z 4 01h25m06.031560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to REPLICATING 2026-01-08T15:24:15.227494Z 10 01h25m09.469560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to READY 2026-01-08T15:24:15.228633Z 1 01h25m09.470072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.228704Z 1 01h25m09.470072s :BS_NODE DEBUG: [1] VDiskId# [80000010:2:0:0:0] destroyed 2026-01-08T15:24:15.229229Z 7 01h25m11.164560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to READY 2026-01-08T15:24:15.230205Z 1 01h25m11.165072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.230277Z 1 01h25m11.165072s :BS_NODE DEBUG: [1] VDiskId# [80000031:2:0:1:0] destroyed 2026-01-08T15:24:15.252040Z 4 01h25m12.483560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to READY 2026-01-08T15:24:15.253319Z 1 01h25m12.484072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.253397Z 1 01h25m12.484072s :BS_NODE DEBUG: [1] VDiskId# [80000032:1:0:2:0] destroyed 2026-01-08T15:24:15.253529Z 5 01h25m12.534560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to READY 2026-01-08T15:24:15.254423Z 1 01h25m12.535072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.254475Z 1 01h25m12.535072s :BS_NODE DEBUG: [1] VDiskId# [80000052:1:0:2:0] destroyed 2026-01-08T15:24:15.254636Z 7 01h25m13.379560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to READY 2026-01-08T15:24:15.255393Z 1 01h25m13.380072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.255459Z 1 01h25m13.380072s :BS_NODE DEBUG: [1] VDiskId# [80000021:2:0:1:0] destroyed 2026-01-08T15:24:15.255548Z 2 01h25m13.524560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to READY 2026-01-08T15:24:15.256289Z 1 01h25m13.525072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.256337Z 1 01h25m13.525072s :BS_NODE DEBUG: [1] VDiskId# [80000062:1:0:2:0] destroyed 2026-01-08T15:24:15.257325Z 5 01h25m16.520560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to READY 2026-01-08T15:24:15.258137Z 1 01h25m16.521072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.258188Z 1 01h25m16.521072s :BS_NODE DEBUG: [1] VDiskId# [80000072:1:0:2:0] destroyed 2026-01-08T15:24:15.258290Z 8 01h25m17.276560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to READY 2026-01-08T15:24:15.266164Z 1 01h25m17.277072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.266243Z 1 01h25m17.277072s :BS_NODE DEBUG: [1] VDiskId# [80000061:2:0:1:0] destroyed 2026-01-08T15:24:15.266810Z 4 01h25m23.916560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to READY 2026-01-08T15:24:15.267727Z 1 01h25m23.917072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.267787Z 1 01h25m23.917072s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] destroyed 2026-01-08T15:24:15.268185Z 7 01h25m24.484560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to READY 2026-01-08T15:24:15.269011Z 1 01h25m24.485072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.269057Z 1 01h25m24.485072s :BS_NODE DEBUG: [1] VDiskId# [80000001:2:0:1:0] destroyed 2026-01-08T15:24:15.269509Z 4 01h25m26.028560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to READY 2026-01-08T15:24:15.275434Z 1 01h25m26.029072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.275499Z 1 01h25m26.029072s :BS_NODE DEBUG: [1] VDiskId# [80000022:1:0:2:0] destroyed 2026-01-08T15:24:15.276791Z 2 01h25m28.882560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to READY 2026-01-08T15:24:15.277780Z 1 01h25m28.883072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.277840Z 1 01h25m28.883072s :BS_NODE DEBUG: [1] VDiskId# [80000042:1:0:2:0] destroyed 2026-01-08T15:24:15.286392Z 7 01h25m31.135560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to READY 2026-01-08T15:24:15.287631Z 1 01h25m31.136072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.287689Z 1 01h25m31.136072s :BS_NODE DEBUG: [1] VDiskId# [80000051:2:0:1:0] destroyed 2026-01-08T15:24:15.287815Z 10 01h25m31.272560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to READY 2026-01-08T15:24:15.288646Z 1 01h25m31.273072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.288720Z 1 01h25m31.273072s :BS_NODE DEBUG: [1] VDiskId# [80000040:2:0:0:0] destroyed 2026-01-08T15:24:15.289066Z 10 01h25m31.624560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to READY 2026-01-08T15:24:15.289888Z 1 01h25m31.625072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.289943Z 1 01h25m31.625072s :BS_NODE DEBUG: [1] VDiskId# [80000070:2:0:0:0] destroyed 2026-01-08T15:24:15.299001Z 4 01h25m33.162560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to READY 2026-01-08T15:24:15.300178Z 1 01h25m33.163072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2026-01-08T15:24:15.300239Z 1 01h25m33.163072s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] destroyed |83.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh |83.4%| [TA] $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh |83.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] Test command err: RandomSeed# 11873627041364305642 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 3 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2026-01-08T15:24:10.878927Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2026-01-08T15:24:11.330555Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:11.331941Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2026-01-08T15:24:11.583476Z 3 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:11.584430Z 1 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:11.585115Z 2 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:11.585428Z 1 00h02m30.110512s :BS_PROXY_PUT ERROR: [ff477a6b0d9e42da] Result# TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 2026-01-08T15:24:11.585741Z 1 00h02m30.110512s :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038080 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.392 sample PartId# [1:1:5:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:7:0] NodeId# 8 } TEvVPut{ TimestampMs# 0.393 sample PartId# [1:1:5:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:6:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.393 sample PartId# [1:1:5:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:5:0] NodeId# 6 } TEvVPut{ TimestampMs# 0.393 sample PartId# [1:1:5:0:0:32768:3] QueryCount# 1 VDiskId# [82000000:1:0:4:0] NodeId# 5 } TEvVPut{ TimestampMs# 0.394 sample PartId# [1:1:5:0:0:32768:2] QueryCount# 1 VDiskId# [82000000:1:0:3:0] NodeId# 4 } TEvVPut{ TimestampMs# 0.394 sample PartId# [1:1:5:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 1.446 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.503 sample PartId# [1:1:5:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 2.114 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 2.173 sample PartId# [1:1:5:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 2.395 VDiskId# [82000000:1:0:7:0] NodeId# 8 Status# OK } TEvVPutResult{ TimestampMs# 2.415 VDiskId# [82000000:1:0:6:0] NodeId# 7 Status# OK } TEvVPutResult{ TimestampMs# 2.427 VDiskId# [82000000:1:0:5:0] NodeId# 6 Status# OK } TEvVPutResult{ TimestampMs# 2.443 VDiskId# [82000000:1:0:4:0] NodeId# 5 Status# OK } TEvVPutResult{ TimestampMs# 2.468 VDiskId# [82000000:1:0:3:0] NodeId# 4 Status# OK } TEvVPutResult{ TimestampMs# 2.633 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# ERROR ErrorReason# "VDisk is in read-only mode" } ] } TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Putting VDisk #4 to normal === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Putting VDisk #5 to normal === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Putting VDisk #6 to normal === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest >> BSCMovePDisk::PDiskMove_Mirror3dc3Nodes [GOOD] Test command err: RandomSeed# 16194411225695366378 |83.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut_pg/unittest |83.4%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |83.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] Test command err: RandomSeed# 7477605548585146098 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2026-01-08T15:24:11.588410Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2026-01-08T15:24:11.593581Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2026-01-08T15:24:11.603018Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2026-01-08T15:24:11.605882Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2026-01-08T15:24:11.639910Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2026-01-08T15:24:11.651207Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2026-01-08T15:24:11.664634Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2026-01-08T15:24:11.688324Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2026-01-08T15:24:16.712950Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.713088Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.713228Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.714042Z 1 00h05m30.160512s :BS_PROXY_PUT ERROR: [548953550f2310e9] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 2026-01-08T15:24:16.715899Z 1 00h05m30.160512s :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038080 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.455 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.456 sample PartId# [1:1:11:0:0:32768:3] QueryCount# 1 VDiskId# [82000000:1:0:7:0] NodeId# 8 } TEvVPut{ TimestampMs# 0.456 sample PartId# [1:1:11:0:0:32768:2] QueryCount# 1 VDiskId# [82000000:1:0:6:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.456 sample PartId# [1:1:11:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.457 sample PartId# [1:1:11:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:5:0] NodeId# 6 } TEvVPut{ TimestampMs# 0.457 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 1.457 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.508 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:3:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 1.667 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.694 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:4:0] NodeId# 5 } TEvVPutResult{ TimestampMs# 1.725 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# ERROR ErrorReason# "VDisk is in read-only mode" } ] } TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2026-01-08T15:24:16.717808Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.742919Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.744242Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2026-01-08T15:24:16.746198Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.755346Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.756293Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderN ... Id# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:17:0:0:32768:0] 2026-01-08T15:24:16.807561Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.807832Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.807896Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:17:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:18:0:0:131072:0] 2026-01-08T15:24:16.810306Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.817691Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.817824Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:18:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 7 Situations# SUUUUU } { OrderNumber# 0 Situations# UEUUUU } { OrderNumber# 1 Situations# UUEUUU } { OrderNumber# 2 Situations# UUUEUU } { OrderNumber# 3 Situations# UUUUSU } { OrderNumber# 4 Situations# UUUUUS } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:19:0:0:32768:0] 2026-01-08T15:24:16.828812Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.829056Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.829142Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:19:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# UUEUUU } { OrderNumber# 1 Situations# UUUEUU } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } { OrderNumber# 4 Situations# UUSUUU } { OrderNumber# 5 Situations# UUUUSU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:20:0:0:131072:0] 2026-01-08T15:24:16.840294Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.840437Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.840562Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:20:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvGet with key [1:1:11:0:0:32768:0] 2026-01-08T15:24:16.872326Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5325:705] 2026-01-08T15:24:16.872539Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5332:712] 2026-01-08T15:24:16.872595Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5339:719] 2026-01-08T15:24:16.873154Z 1 00h05m30.160512s :BS_PROXY_GET ERROR: [bfc8091b7ebf05b0] Response# TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} Marker# BPG29 2026-01-08T15:24:16.873511Z 1 00h05m30.160512s :BS_PROXY_GET ERROR: {BPG72@dsproxy_get.cpp:427} Query history GroupId# 2181038080 HandleClass# FastRead History# THistory { Entries# [ TEvVGet{ TimestampMs# 0.503 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.504 sample PartId# [1:1:11:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVGet{ TimestampMs# 0.504 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVGet{ TimestampMs# 0.504 sample PartId# [1:1:11:0:0:32768:1] QueryCount# 6 VDiskId# [82000000:1:0:3:0] NodeId# 4 } TEvVGet{ TimestampMs# 0.504 sample PartId# [1:1:11:0:0:32768:1] QueryCount# 6 VDiskId# [82000000:1:0:4:0] NodeId# 5 } TEvVGet{ TimestampMs# 0.505 sample PartId# [1:1:11:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:5:0] NodeId# 6 } TEvVGet{ TimestampMs# 0.505 sample PartId# [1:1:11:0:0:32768:2] QueryCount# 1 VDiskId# [82000000:1:0:6:0] NodeId# 7 } TEvVGet{ TimestampMs# 0.505 sample PartId# [1:1:11:0:0:32768:3] QueryCount# 1 VDiskId# [82000000:1:0:7:0] NodeId# 8 } TEvVGetResult{ TimestampMs# 15.147 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# OK } TEvVGetResult{ TimestampMs# 15.855 VDiskId# [82000000:1:0:3:0] NodeId# 4 Status# OK } TEvVGetResult{ TimestampMs# 15.894 VDiskId# [82000000:1:0:4:0] NodeId# 5 Status# OK } TEvVGetResult{ TimestampMs# 15.922 VDiskId# [82000000:1:0:5:0] NodeId# 6 Status# OK } TEvVGetResult{ TimestampMs# 15.949 VDiskId# [82000000:1:0:6:0] NodeId# 7 Status# OK } TEvVGetResult{ TimestampMs# 16.203 VDiskId# [82000000:1:0:7:0] NodeId# 8 Status# OK } TEvVPut{ TimestampMs# 16.228 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 16.229 sample PartId# [1:1:11:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVPut{ TimestampMs# 16.229 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVGetResult{ TimestampMs# 16.889 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# OK } TEvVPut{ TimestampMs# 16.896 sample PartId# [1:1:11:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVGetResult{ TimestampMs# 16.933 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# OK } TEvVPut{ TimestampMs# 16.937 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 16.968 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# ERROR ErrorReason# "VDisk is in read-only mode" } PutAcceleration{ TimestampMs# 16.972 } PutAcceleration{ TimestampMs# 16.992 } TEvVPutResult{ TimestampMs# 17.186 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPutResult{ TimestampMs# 17.402 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# ERROR ErrorReason# "VDisk is in read-only mode" } ] } 2026-01-08T15:24:16.873632Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5332:712] 2026-01-08T15:24:16.873697Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5339:719] TEvGetResult: TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} |83.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TQueueBackpressureTest::IncorrectMessageId [GOOD] >> BSCRestartPDisk::RestartNotAllowed [GOOD] |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.5%| [TA] {BAZEL_UPLOAD} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |83.5%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut_pg/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/unittest |83.5%| [TA] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/testlib/ut/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |83.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.5%| [TS] {BAZEL_UPLOAD} ydb/library/actors/testlib/ut/unittest |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |83.6%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartNotAllowed [GOOD] Test command err: RandomSeed# 195782921086861285 >> TBsVDiskRepl1::ReplProxyKeepBits >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::IncorrectMessageId [GOOD] |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction |83.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |83.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |83.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |83.6%| [TA] $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_move_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |83.6%| [TA] {RESULT} $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest |83.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/scheme/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> BridgeGet::PartRestorationAcrossBridge |83.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |83.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TA] $(B)/ydb/library/actors/testlib/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BridgeGet::PartRestorationAcrossBridgeOnDiscover |83.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TA] {RESULT} $(B)/ydb/library/actors/testlib/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/library/actors/testlib/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |83.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsLocalRecovery::StartStopNotEmptyDB [GOOD] >> TBsLocalRecovery::WriteRestartRead |83.6%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] >> TS3WrapperTests::HeadUnknownObject |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh |83.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |83.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> TS3WrapperTests::HeadUnknownObject [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction >> TCowBTreeTest::MultipleSnapshots [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc >> TBsVDiskRepl1::ReplProxyKeepBits [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CopyPartUpload >> ReadOnlyVDisk::TestWrites [GOOD] |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject [GOOD] >> TS3WrapperTests::CopyPartUpload [GOOD] |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadUnknownObject [GOOD] Test command err: 2026-01-08T15:24:25.369227Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 1D201BE5-6D29-445E-B400-2E7E4360EF71, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:15371 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 08B2E69F-D2B3-4412-8B37-62458C0CF539 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 2026-01-08T15:24:25.406853Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 1D201BE5-6D29-445E-B400-2E7E4360EF71, response# No response body. |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::PutObject ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] Test command err: 2026-01-08T15:24:08.578068Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2026-01-08T15:24:08.578115Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:08.578190Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2026-01-08T15:24:08.578233Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:08.578273Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2026-01-08T15:24:08.578295Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:08.578347Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2026-01-08T15:24:08.578383Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:08.578428Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2026-01-08T15:24:08.578451Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:08.578488Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2026-01-08T15:24:08.578511Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:08.578561Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2026-01-08T15:24:08.578586Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:08.578630Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2026-01-08T15:24:08.578653Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:08.578687Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2026-01-08T15:24:08.578708Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:08.578752Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2026-01-08T15:24:08.578797Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:08.578838Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2026-01-08T15:24:08.578860Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:08.578905Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2026-01-08T15:24:08.578929Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:08.578976Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2026-01-08T15:24:08.578999Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:08.579037Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2026-01-08T15:24:08.579059Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:08.579123Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2026-01-08T15:24:08.579148Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:08.579184Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2026-01-08T15:24:08.579205Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2026-01-08T15:24:08.579238Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2026-01-08T15:24:08.579260Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2026-01-08T15:24:08.579300Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2026-01-08T15:24:08.579333Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2026-01-08T15:24:08.579391Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2026-01-08T15:24:08.579416Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2026-01-08T15:24:08.579452Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2026-01-08T15:24:08.579473Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2026-01-08T15:24:08.579509Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2026-01-08T15:24:08.579530Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2026-01-08T15:24:08.579564Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2026-01-08T15:24:08.579585Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2026-01-08T15:24:08.579617Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2026-01-08T15:24:08.579638Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2026-01-08T15:24:08.579674Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2026-01-08T15:24:08.579695Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2026-01-08T15:24:08.579734Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2026-01-08T15:24:08.579765Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2026-01-08T15:24:08.579805Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2026-01-08T15:24:08.579839Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2026-01-08T15:24:08.579877Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2026-01-08T15:24:08.579901Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2026-01-08T15:24:08.579957Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2026-01-08T15:24:08.579980Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2026-01-08T15:24:08.580019Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2026-01-08T15:24:08.580040Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2026-01-08T15:24:08.580073Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2026-01-08T15:24:08.580094Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2026-01-08T15:24:08.580133Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2026-01-08T15:24:08.580153Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2026-01-08T15:24:08.580194Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2026-01-08T15:24:08.580215Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2026-01-08T15:24:08.598263Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2163:55] Status# ERROR ClientId# [1:2163:55] ServerId# [0:0:0] PipeClient# [1:2163:55] 2026-01-08T15:24:08.599498Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2164:37] Status# ERROR ClientId# [2:2164:37] ServerId# [0:0:0] PipeClient# [2:2164:37] 2026-01-08T15:24:08.599552Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2165:37] Status# ERROR ClientId# [3:2165:37] ServerId# [0:0:0] PipeClient# [3:2165:37] 2026-01-08T15:24:08.599600Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2166:37] Status# ERROR ClientId# [4:2166:37] ServerId# [0:0:0] PipeClient# [4:2166:37] 2026-01-08T15:24:08.599644Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2167:37] Status# ERROR ClientId# [5:2167:37] ServerId# [0:0:0] PipeClient# [5:2167:37] 2026-01-08T15:24:08.599681Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2168:37] Status# ERROR ClientId# [6:2168:37] ServerId# [0:0:0] PipeClient# [6:2168:37] 2026-01-08T15:24:08.599716Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2169:37] Status# ERROR ClientId# [7:2169:37] ServerId# [0:0:0] PipeClient# [7:2169:37] 2026-01-08T15:24:08.599751Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2170:37] Status# ERROR ClientId# [8:2170:37] ServerId# [0:0:0] PipeClient# [8:2170:37] 2026-01-08T15:24:08.599798Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2171:37] Status# ERROR ClientId# [9:2171:37] ServerId# [0:0:0] PipeClient# [9:2171:37] 2026-01-08T15:24:08.599840Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2172:37] Status# ERROR ClientId# [10:2172:37] ServerId# [0:0:0] PipeClient# [10:2172:37] 2026-01-08T15:24:08.599885Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2173:37] Status# ERROR ClientId# [11:2173:37] ServerId# [0:0:0] PipeClient# [11:2173:37] 2026-01-08T15:24:08.599931Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2174:37] Status# ERROR ClientId# [12:2174:37] ServerId# [0:0:0] PipeClient# [12:2174:37] 2026-01-08T15:24:08.599976Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2175:37] Status# ERROR ClientId# [13:2175:37] ServerId# [0:0:0] PipeClient# [13:2175:37] 2026-01-08T15:24:08.600025Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2176:37] Status# ERROR ClientId# [14:2176:37] ServerId# [0:0:0] PipeClient# [14:2176:37] 2026-01-08T15:24:08.600060Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2177:37] Status# ERROR ClientId# [15:2177:37] ServerId# [0:0:0] PipeClient# [15:2177:37] 2026-01-08T15:24:08.600095Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2178:37] Status# ERROR ClientId# [16:2178:37] ServerId# [0:0:0] PipeClient# [16:2178:37] 2026-01-08T15:24:08.600133Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2179:37] Status# ERROR ClientId# [17:2179:37] ServerId# [0:0:0] PipeClient# [17:2179:37] 2026-01-08T15:24:08.600184Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2180:37] Status# ERROR ClientId# [18:2180:37] ServerId# [0:0:0] PipeClient# [18:2180:37] 2026-01-08T15:24:08.600220Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2181:37] Status# ERROR ClientId# [19:2181:37] ServerId# [0:0:0] PipeClient# [19:2181:37] 2026-01-08T15:24:08.600254Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2182:37] Status# ERROR ClientId# [20:2182:37] ServerId# [0:0:0] PipeClient# [20:2182:37] 2026-01-08T15:24:08.600293Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2183:37] Status# ERROR ClientId# [21:2183:37] ServerId# [0:0:0] PipeClient# [21:2183:37] 2026-01-08T15:24:08.600332Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2184:37] Status# ERROR ClientId# [22:2184:37] ServerId# [0:0:0] PipeClient# [22:2184:37] 2026-01-08T15:24:08.600370Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2185:37] Status# ERROR ClientId# [23:2185:37] ServerId# [0:0:0] PipeClient# [23:2185:37] 2026-01-08T15:24:08.600409Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2186:37] Status# ERROR ClientId# [24:2186:37] ServerId# [0:0:0] PipeClient# [24:2186:37] 2026-01-08T15:24:08.600461Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2187:37] Status# ERROR ClientId# [25:2187:37] ServerId# [0:0:0] PipeClient# [25:2187:37] 2026-01-08T15:24:08.600509Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2188:37] Status# ERROR ClientId# [26:2188:37] ServerId# [0:0:0] PipeClient# [26:2188:37] 2026-01-08T15:24:08.600557Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2189:37] Status# ERROR ClientId# [27:2189:37] ServerId# [0:0:0] PipeClient# [27:2189:37] 2026-01-08T15:24:08.600599Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2190:37] Status# ERROR ClientId# [28:2190:37] ServerId# [0:0:0] PipeClient# [28:2190:37] 2026-01-08T15:24:08.600634Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2191:37] Status# ERROR ClientId# [29:2191:37] ServerId# [0:0:0] PipeClient# [29:2191:37] 2026-01-08T15:24:08.600687Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2192:37] Status# ERROR ClientId# [30:2192:37] ServerId# [0:0:0] PipeClient# [30:2192:37] 2026-01-08T15:24:08.600723Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2193:37] Status# ERROR ClientId# [31:2193:37] ServerId# [0:0:0] PipeClient# [31:2193:37] 2026-01-08T15:24:08.600758Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2194:37] Status# ERROR ClientId# [32:2194:37] ServerId# [0:0:0] PipeClient# [32:2194:37] 2026-01-08T15:24:08.745470Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.121446s 2026-01-08T15:24:08.745609Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.121605s 2026-01-08T15:24:08.755593Z 1 00h00m00.002560s :BS_NODE DEBUG: [1] CheckState from [1:2264:79] expected 1 current 0 2026-01-08T15:24:08.755652Z 2 00h00m00.002560s :BS_NODE DEBUG: [2] CheckState from [2:2265:38] expected 1 current 0 2026-01-08T15:24:08.755684Z 3 00h00m00.002560s :BS_NODE DEBUG: [3] CheckState from [3:2266:38] expected 1 current 0 2026-01-08T15:24:08.755712Z 4 00h00m00.002560s :BS_NODE DEBUG: [4] CheckState from [4:2267:38] expected 1 current 0 2026-01-08T15:24:08.755740Z 5 00h00m00.002560s :BS_NODE DEBUG: [5] CheckState from [5:2268:38] expected 1 current 0 2026-01-08T15:24:08.755767Z 6 00h00m00.002560s :BS_NODE DEBUG: [6] CheckState from [6:2269:38] expected 1 current 0 2026-01-08T15:24:08.755795Z 7 00h00m00.002560s :BS_NODE DEBUG: [7] CheckState from [ ... 47483668 VDiskId# [80000014:4:0:5:0] DiskIsOk# true 2026-01-08T15:24:24.345991Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483668 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.346026Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483668 VDiskId# [80000014:4:0:6:0] DiskIsOk# true 2026-01-08T15:24:24.346057Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483668 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.346085Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483668 VDiskId# [80000014:4:0:7:0] DiskIsOk# true 2026-01-08T15:24:24.350038Z 1 05h15m00.121504s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483668 Items# [80000014:4:0:4:0]: 5:1001:1002 -> 15:1000:1016 ConfigTxSeqNo# 505 2026-01-08T15:24:24.350077Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483668 Success# true 2026-01-08T15:24:24.350209Z 19 05h15m00.121504s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2026-01-08T15:24:24.350268Z 19 05h15m00.121504s :BS_NODE DEBUG: [19] VDiskId# [80000014:4:0:6:0] -> [80000014:5:0:6:0] 2026-01-08T15:24:24.365006Z 2 05h15m00.121504s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:24.365101Z 2 05h15m00.121504s :BS_NODE DEBUG: [2] VDiskId# [80000014:4:0:1:0] -> [80000014:5:0:1:0] 2026-01-08T15:24:24.365205Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2026-01-08T15:24:24.365251Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] VDiskId# [80000014:4:0:3:0] -> [80000014:5:0:3:0] 2026-01-08T15:24:24.365323Z 5 05h15m00.121504s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.365403Z 6 05h15m00.121504s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2026-01-08T15:24:24.365450Z 6 05h15m00.121504s :BS_NODE DEBUG: [6] VDiskId# [80000014:4:0:5:0] -> [80000014:5:0:5:0] 2026-01-08T15:24:24.365532Z 25 05h15m00.121504s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2026-01-08T15:24:24.365579Z 25 05h15m00.121504s :BS_NODE DEBUG: [25] VDiskId# [80000014:4:0:0:0] -> [80000014:5:0:0:0] 2026-01-08T15:24:24.365659Z 8 05h15m00.121504s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2026-01-08T15:24:24.365708Z 8 05h15m00.121504s :BS_NODE DEBUG: [8] VDiskId# [80000014:4:0:7:0] -> [80000014:5:0:7:0] 2026-01-08T15:24:24.365785Z 26 05h15m00.121504s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2026-01-08T15:24:24.365832Z 26 05h15m00.121504s :BS_NODE DEBUG: [26] VDiskId# [80000014:4:0:2:0] -> [80000014:5:0:2:0] 2026-01-08T15:24:24.365913Z 15 05h15m00.121504s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:24.365956Z 15 05h15m00.121504s :BS_NODE DEBUG: [15] VDiskId# [80000014:5:0:4:0] PDiskId# 1000 VSlotId# 1016 created 2026-01-08T15:24:24.366031Z 15 05h15m00.121504s :BS_NODE DEBUG: [15] VDiskId# [80000014:5:0:4:0] status changed to INIT_PENDING 2026-01-08T15:24:24.366336Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:76} Reassigner starting GroupId# 2147483652 2026-01-08T15:24:24.367000Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367044Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:0:0] DiskIsOk# true 2026-01-08T15:24:24.367081Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367111Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:1:0] DiskIsOk# true 2026-01-08T15:24:24.367142Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367170Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:2:0] DiskIsOk# true 2026-01-08T15:24:24.367199Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367228Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:3:0] DiskIsOk# true 2026-01-08T15:24:24.367255Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367282Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:5:0] DiskIsOk# true 2026-01-08T15:24:24.367310Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367337Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:6:0] DiskIsOk# true 2026-01-08T15:24:24.367366Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483652 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:24.367393Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483652 VDiskId# [80000004:4:0:7:0] DiskIsOk# true 2026-01-08T15:24:24.376164Z 1 05h15m00.122016s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483652 Items# [80000004:4:0:4:0]: 5:1001:1000 -> 15:1000:1017 ConfigTxSeqNo# 506 2026-01-08T15:24:24.376209Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483652 Success# true 2026-01-08T15:24:24.376353Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2026-01-08T15:24:24.376414Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] VDiskId# [80000004:4:0:6:0] -> [80000004:5:0:6:0] 2026-01-08T15:24:24.376507Z 2 05h15m00.122016s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2026-01-08T15:24:24.376556Z 2 05h15m00.122016s :BS_NODE DEBUG: [2] VDiskId# [80000004:4:0:1:0] -> [80000004:5:0:1:0] 2026-01-08T15:24:24.376635Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2026-01-08T15:24:24.376679Z 4 05h15m00.122016s :BS_NODE DEBUG: [4] VDiskId# [80000004:4:0:3:0] -> [80000004:5:0:3:0] 2026-01-08T15:24:24.376737Z 5 05h15m00.122016s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.376810Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2026-01-08T15:24:24.376853Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] VDiskId# [80000004:4:0:5:0] -> [80000004:5:0:5:0] 2026-01-08T15:24:24.376929Z 25 05h15m00.122016s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2026-01-08T15:24:24.376973Z 25 05h15m00.122016s :BS_NODE DEBUG: [25] VDiskId# [80000004:4:0:2:0] -> [80000004:5:0:2:0] 2026-01-08T15:24:24.377050Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2026-01-08T15:24:24.377092Z 8 05h15m00.122016s :BS_NODE DEBUG: [8] VDiskId# [80000004:4:0:7:0] -> [80000004:5:0:7:0] 2026-01-08T15:24:24.377166Z 26 05h15m00.122016s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2026-01-08T15:24:24.377213Z 26 05h15m00.122016s :BS_NODE DEBUG: [26] VDiskId# [80000004:4:0:0:0] -> [80000004:5:0:0:0] 2026-01-08T15:24:24.377291Z 15 05h15m00.122016s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:24.377341Z 15 05h15m00.122016s :BS_NODE DEBUG: [15] VDiskId# [80000004:5:0:4:0] PDiskId# 1000 VSlotId# 1017 created 2026-01-08T15:24:24.377409Z 15 05h15m00.122016s :BS_NODE DEBUG: [15] VDiskId# [80000004:5:0:4:0] status changed to INIT_PENDING 2026-01-08T15:24:24.392485Z 15 05h15m01.246456s :BS_NODE DEBUG: [15] VDiskId# [8000000c:6:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.393380Z 15 05h15m02.141920s :BS_NODE DEBUG: [15] VDiskId# [8000003c:5:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.394060Z 15 05h15m02.604432s :BS_NODE DEBUG: [15] VDiskId# [8000002c:5:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.395982Z 5 05h15m03.488968s :BS_NODE DEBUG: [5] VDiskId# [8000000d:4:0:1:0] status changed to REPLICATING 2026-01-08T15:24:24.396532Z 15 05h15m03.544944s :BS_NODE DEBUG: [15] VDiskId# [8000001c:5:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.397220Z 15 05h15m03.655504s :BS_NODE DEBUG: [15] VDiskId# [80000014:5:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.397962Z 15 05h15m04.870016s :BS_NODE DEBUG: [15] VDiskId# [80000004:5:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.399498Z 15 05h15m05.979480s :BS_NODE DEBUG: [15] VDiskId# [80000034:6:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.400197Z 15 05h15m06.087992s :BS_NODE DEBUG: [15] VDiskId# [80000024:6:0:4:0] status changed to REPLICATING 2026-01-08T15:24:24.400861Z 15 05h15m07.623920s :BS_NODE DEBUG: [15] VDiskId# [8000003c:5:0:4:0] status changed to READY 2026-01-08T15:24:24.402067Z 5 05h15m07.624432s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.402123Z 5 05h15m07.624432s :BS_NODE DEBUG: [5] VDiskId# [8000003c:4:0:4:0] destroyed 2026-01-08T15:24:24.410698Z 15 05h15m10.007944s :BS_NODE DEBUG: [15] VDiskId# [8000001c:5:0:4:0] status changed to READY 2026-01-08T15:24:24.412190Z 5 05h15m10.008456s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.412246Z 5 05h15m10.008456s :BS_NODE DEBUG: [5] VDiskId# [8000001c:4:0:4:0] destroyed 2026-01-08T15:24:24.412422Z 5 05h15m13.672968s :BS_NODE DEBUG: [5] VDiskId# [8000000d:4:0:1:0] status changed to READY 2026-01-08T15:24:24.413372Z 5 05h15m13.673480s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.413420Z 5 05h15m13.673480s :BS_NODE DEBUG: [5] VDiskId# [8000000d:3:0:1:0] destroyed 2026-01-08T15:24:24.418618Z 15 05h15m17.937016s :BS_NODE DEBUG: [15] VDiskId# [80000004:5:0:4:0] status changed to READY 2026-01-08T15:24:24.420036Z 5 05h15m17.937528s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.420088Z 5 05h15m17.937528s :BS_NODE DEBUG: [5] VDiskId# [80000004:4:0:4:0] destroyed 2026-01-08T15:24:24.420440Z 15 05h15m20.721504s :BS_NODE DEBUG: [15] VDiskId# [80000014:5:0:4:0] status changed to READY 2026-01-08T15:24:24.421510Z 5 05h15m20.722016s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.421557Z 5 05h15m20.722016s :BS_NODE DEBUG: [5] VDiskId# [80000014:4:0:4:0] destroyed 2026-01-08T15:24:24.421690Z 15 05h15m22.223456s :BS_NODE DEBUG: [15] VDiskId# [8000000c:6:0:4:0] status changed to READY 2026-01-08T15:24:24.426993Z 5 05h15m22.223968s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.427048Z 5 05h15m22.223968s :BS_NODE DEBUG: [5] VDiskId# [8000000c:5:0:4:0] destroyed 2026-01-08T15:24:24.427921Z 15 05h15m28.017432s :BS_NODE DEBUG: [15] VDiskId# [8000002c:5:0:4:0] status changed to READY 2026-01-08T15:24:24.429035Z 5 05h15m28.017944s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.429082Z 5 05h15m28.017944s :BS_NODE DEBUG: [5] VDiskId# [8000002c:4:0:4:0] destroyed 2026-01-08T15:24:24.429422Z 15 05h15m29.771992s :BS_NODE DEBUG: [15] VDiskId# [80000024:6:0:4:0] status changed to READY 2026-01-08T15:24:24.432039Z 5 05h15m29.772504s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.432092Z 5 05h15m29.772504s :BS_NODE DEBUG: [5] VDiskId# [80000024:5:0:4:0] destroyed 2026-01-08T15:24:24.433166Z 15 05h15m35.626480s :BS_NODE DEBUG: [15] VDiskId# [80000034:6:0:4:0] status changed to READY 2026-01-08T15:24:24.434248Z 5 05h15m35.626992s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2026-01-08T15:24:24.434299Z 5 05h15m35.626992s :BS_NODE DEBUG: [5] VDiskId# [80000034:5:0:4:0] destroyed |83.7%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestWrites [GOOD] Test command err: RandomSeed# 4688811170678083315 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2026-01-08T15:24:21.167762Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2026-01-08T15:24:21.190119Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2026-01-08T15:24:21.204207Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2026-01-08T15:24:21.215876Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2026-01-08T15:24:21.245664Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2026-01-08T15:24:21.254237Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2026-01-08T15:24:21.266200Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2026-01-08T15:24:21.282289Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2026-01-08T15:24:23.127066Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] 2026-01-08T15:24:23.127199Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:23.127324Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] 2026-01-08T15:24:23.128092Z 1 00h03m30.110512s :BS_PROXY_PUT ERROR: [d17bf90cd42c52c0] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 2026-01-08T15:24:23.129735Z 1 00h03m30.110512s :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038080 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.368 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.369 sample PartId# [1:1:11:0:0:32768:3] QueryCount# 1 VDiskId# [82000000:1:0:7:0] NodeId# 8 } TEvVPut{ TimestampMs# 0.369 sample PartId# [1:1:11:0:0:32768:2] QueryCount# 1 VDiskId# [82000000:1:0:6:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.369 sample PartId# [1:1:11:0:0:32768:5] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.369 sample PartId# [1:1:11:0:0:32768:1] QueryCount# 1 VDiskId# [82000000:1:0:5:0] NodeId# 6 } TEvVPut{ TimestampMs# 0.37 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 1.266 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.316 sample PartId# [1:1:11:0:0:32768:4] QueryCount# 1 VDiskId# [82000000:1:0:3:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 1.464 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.493 sample PartId# [1:1:11:0:0:32768:6] QueryCount# 1 VDiskId# [82000000:1:0:4:0] NodeId# 5 } TEvVPutResult{ TimestampMs# 1.521 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# ERROR ErrorReason# "VDisk is in read-only mode" } ] } TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2026-01-08T15:24:23.131489Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] 2026-01-08T15:24:23.131649Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] 2026-01-08T15:24:23.132731Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2026-01-08T15:24:23.134544Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5323:704] 2026-01-08T15:24:23.135387Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] 2026-01-08T15:24:23.136219Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNu ... ey [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but the writes still go through === SEND TEvPut with key [1:1:21:0:0:32768:0] 2026-01-08T15:24:25.676924Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.677099Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:22:0:0:131072:0] 2026-01-08T15:24:25.680589Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] 2026-01-08T15:24:25.681935Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:23:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:24:0:0:131072:0] 2026-01-08T15:24:25.691855Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:25:0:0:32768:0] 2026-01-08T15:24:25.694790Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.694895Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:26:0:0:131072:0] 2026-01-08T15:24:25.697519Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.697606Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:27:0:0:32768:0] 2026-01-08T15:24:25.700210Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.700296Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:28:0:0:131072:0] 2026-01-08T15:24:25.702747Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] 2026-01-08T15:24:25.702981Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:29:0:0:32768:0] 2026-01-08T15:24:25.705623Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.705732Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:30:0:0:131072:0] 2026-01-08T15:24:25.708026Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5337:718] 2026-01-08T15:24:25.708150Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5330:711] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} === Read all 31 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:21:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:21:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:22:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:22:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:23:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:23:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:24:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:24:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:25:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:25:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:26:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:26:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:27:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:27:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:28:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:28:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:29:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:29:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:30:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:30:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CopyPartUpload [GOOD] Test command err: 2026-01-08T15:24:26.337422Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4A7C5BDF-995B-486A-A89E-D1104F44852D, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:12059 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 23F65AD0-CAA8-47C0-A742-D2D06CA62828 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2026-01-08T15:24:26.358429Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4A7C5BDF-995B-486A-A89E-D1104F44852D, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2026-01-08T15:24:26.359004Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 79B7B727-0184-45AC-A9F0-38DAF824DBAE, request# CreateMultipartUpload { Bucket: TEST Key: key1 } REQUEST: POST /TEST/key1?uploads HTTP/1.1 HEADERS: Host: localhost:12059 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2BC96D55-3381-4B31-9C09-0E10C0908756 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploads= 2026-01-08T15:24:26.366978Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 79B7B727-0184-45AC-A9F0-38DAF824DBAE, response# CreateMultipartUploadResult { Bucket: Key: TEST/key1 UploadId: 1 } 2026-01-08T15:24:26.367495Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B1BF1737-2CCF-4082-8181-78F8F2B392EA, request# UploadPartCopy { Bucket: TEST Key: key1 UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key1?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:12059 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F0024E33-FAE2-4459-9FC9-88DD3D7E173A amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 x-amz-copy-source: /TEST/key x-amz-copy-source-range: bytes=1-2 S3_MOCK::HttpServeWrite: /TEST/key1 / partNumber=1&uploadId=1 / 0 2026-01-08T15:24:26.386035Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B1BF1737-2CCF-4082-8181-78F8F2B392EA, response# UploadPartCopyResult { } 2026-01-08T15:24:26.386629Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 7A213983-61EC-4749-97C4-3421BC945482, request# CompleteMultipartUpload { Bucket: TEST Key: key1 UploadId: 1 MultipartUpload: { Parts: [afc7e8a98f75755e513d9d5ead888e1d] } } REQUEST: POST /TEST/key1?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:12059 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: BD878DD4-35AD-474E-8ABB-2CFFF1481C31 amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploadId=1 2026-01-08T15:24:26.402242Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 7A213983-61EC-4749-97C4-3421BC945482, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key1 ETag: afc7e8a98f75755e513d9d5ead888e1d } 2026-01-08T15:24:26.402765Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 18E4169E-EAD2-4558-B17A-2CB6C01C9A79, request# GetObject { Bucket: TEST Key: key1 Range: bytes=0-1 } REQUEST: GET /TEST/key1 HTTP/1.1 HEADERS: Host: localhost:12059 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 79C6BD9E-EE38-44F8-9E26-854C72435A88 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-1 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key1 / 2 2026-01-08T15:24:26.408356Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 18E4169E-EAD2-4558-B17A-2CB6C01C9A79, response# GetObjectResult { } |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::PutObject [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] |83.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatch >> TS3WrapperTests::GetObject |83.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |83.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut >> ReadOnlyVDisk::TestGarbageCollect [GOOD] >> TS3WrapperTests::AbortMultipartUpload [GOOD] |83.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject [GOOD] Test command err: 2026-01-08T15:24:26.334078Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 62683170-0F35-49D7-88C6-6C21799F1EEC, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:5237 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 01717FD7-2127-4526-A0FA-F925EF3C2C41 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 2026-01-08T15:24:26.345003Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 62683170-0F35-49D7-88C6-6C21799F1EEC, response# No response body. |83.7%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnly |83.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoTest::SubgroupPartLayout [GOOD] >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 >> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetObject [GOOD] >> BridgeGet::PartRestorationAcrossBridge [GOOD] >> TS3WrapperTests::AbortUnknownUpload |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnlyReverseEventsOrder >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnly [GOOD] >> TS3WrapperTests::AbortUnknownUpload [GOOD] >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnlyReverseEventsOrder [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::PutObject [GOOD] Test command err: 2026-01-08T15:24:27.604228Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# E1207FDF-AB69-4F84-898E-78453D776C9C, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:64841 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D5E1AC70-F239-4D2B-8821-80CCCBA0E0DC amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2026-01-08T15:24:27.638965Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# E1207FDF-AB69-4F84-898E-78453D776C9C, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] Test command err: 2026-01-08T15:24:28.176017Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# BD5CE438-57AC-4390-BCF3-AFBA48DF6777, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:3752 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F11CFDB4-E3E9-4887-989A-26303BC39E5B amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2026-01-08T15:24:28.193691Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# BD5CE438-57AC-4390-BCF3-AFBA48DF6777, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2026-01-08T15:24:28.198643Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# F9A20555-2C27-4824-85E8-083F9AF19377, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: 1 } REQUEST: DELETE /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:3752 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8E8B5994-412A-4980-A372-55ED1320558A amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=1 2026-01-08T15:24:28.221166Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# F9A20555-2C27-4824-85E8-083F9AF19377, response# AbortMultipartUploadResult { } 2026-01-08T15:24:28.222589Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# F120D37F-FB56-42B9-9444-168434AA1D40, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:3752 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3135BEC8-3BA1-4A79-A31C-688082F3BAAB amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 2026-01-08T15:24:28.242958Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# F120D37F-FB56-42B9-9444-168434AA1D40, response# No response body. |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGarbageCollect [GOOD] Test command err: RandomSeed# 7578716761398184886 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 2 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:1:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:131072:0] 2026-01-08T15:24:10.597096Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2026-01-08T15:24:10.618200Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] SEND TEvGet with key [1:1:2:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2026-01-08T15:24:12.430352Z 1 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:12.431236Z 2 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2026-01-08T15:24:13.227779Z 1 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:13.227957Z 2 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2026-01-08T15:24:13.693019Z 1 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:13.694071Z 2 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:13.695073Z 3 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:13.695347Z 1 00h05m00.200000s :BS_PROXY_PUT ERROR: [ef5bf34f429f222e] Result# TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 2026-01-08T15:24:13.696938Z 1 00h05m00.200000s :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038080 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.529 sample PartId# [1:1:4:0:0:131072:6] QueryCount# 1 VDiskId# [82000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.53 sample PartId# [1:1:4:0:0:131072:5] QueryCount# 1 VDiskId# [82000000:1:0:7:0] NodeId# 8 } TEvVPut{ TimestampMs# 0.53 sample PartId# [1:1:4:0:0:131072:4] QueryCount# 1 VDiskId# [82000000:1:0:6:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.53 sample PartId# [1:1:4:0:0:131072:3] QueryCount# 1 VDiskId# [82000000:1:0:5:0] NodeId# 6 } TEvVPut{ TimestampMs# 0.531 sample PartId# [1:1:4:0:0:131072:2] QueryCount# 1 VDiskId# [82000000:1:0:4:0] NodeId# 5 } TEvVPut{ TimestampMs# 0.531 sample PartId# [1:1:4:0:0:131072:1] QueryCount# 1 VDiskId# [82000000:1:0:3:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 1.121 VDiskId# [82000000:1:0:0:0] NodeId# 1 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 1.163 sample PartId# [1:1:4:0:0:131072:6] QueryCount# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 2.407 VDiskId# [82000000:1:0:1:0] NodeId# 2 Status# ERROR ErrorReason# "VDisk is in read-only mode" } TEvVPut{ TimestampMs# 2.45 sample PartId# [1:1:4:0:0:131072:6] QueryCount# 1 VDiskId# [82000000:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 2.748 VDiskId# [82000000:1:0:7:0] NodeId# 8 Status# OK } TEvVPutResult{ TimestampMs# 2.763 VDiskId# [82000000:1:0:6:0] NodeId# 7 Status# OK } TEvVPutResult{ TimestampMs# 2.774 VDiskId# [82000000:1:0:5:0] NodeId# 6 Status# OK } TEvVPutResult{ TimestampMs# 2.789 VDiskId# [82000000:1:0:4:0] NodeId# 5 Status# OK } TEvVPutResult{ TimestampMs# 2.818 VDiskId# [82000000:1:0:3:0] NodeId# 4 Status# OK } TEvVPutResult{ TimestampMs# 2.951 VDiskId# [82000000:1:0:2:0] NodeId# 3 Status# ERROR ErrorReason# "VDisk is in read-only mode" } ] } TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} 2026-01-08T15:24:14.464881Z 1 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:14.465071Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:14.465120Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2026-01-08T15:24:15.955036Z 1 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:15.955217Z 2 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:15.955266Z 3 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:15.955311Z 4 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2026-01-08T15:24:16.445185Z 1 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.445387Z 2 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.445439Z 3 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.445487Z 4 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:16.445533Z 5 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2026-01-08T15:24:16.855553Z 1 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:16.855739Z 2 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:16.855791Z 3 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:16.855841Z 4 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:16.855887Z 5 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:16.855935Z 6 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2026-01-08T15:24:17.293591Z 1 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5324:704] 2026-01-08T15:24:17.293772Z 2 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:17.293822Z 3 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:17.293869Z 4 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:17.293916Z 5 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:17.293962Z 6 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:17.294007Z 7 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2026-01-08T15:24:17.609329Z 2 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5331:711] 2026-01-08T15:24:17.609435Z 3 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:17.609482Z 4 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:17.609530Z 5 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:17.609573Z 6 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:17.609616Z 7 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] 2026-01-08T15:24:18.494306Z 3 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5338:718] 2026-01-08T15:24:18.494413Z 4 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:18.494468Z 5 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:18.494518Z 6 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:18.494567Z 7 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] 2026-01-08T15:24:19.077946Z 4 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5345:725] 2026-01-08T15:24:19.078043Z 5 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:19.078097Z 6 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:19.078148Z 7 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2026-01-08T15:24:19.597234Z 5 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5352:732] 2026-01-08T15:24:19.597336Z 6 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:19.597409Z 7 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2026-01-08T15:24:22.138229Z 6 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5359:739] 2026-01-08T15:24:22.155715Z 7 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2026-01-08T15:24:23.744995Z 7 00h14m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5366:746] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} SEND TEvPut with key [1:1:4:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} SEND TEvGet with key [1:1:4:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:1:0] NODATA Size# 0}} >> TStateStorageRingGroupState::TestStateStorageUpdateSigConfigVersionChanged >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetObject [GOOD] Test command err: 2026-01-08T15:24:28.616101Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 41468AA1-905D-4B03-8C54-CA0A316835B4, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:24943 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B7EFB533-906F-4E76-87BC-816CAFD2F2DA amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2026-01-08T15:24:28.643025Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 41468AA1-905D-4B03-8C54-CA0A316835B4, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2026-01-08T15:24:28.643597Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4C338ABA-14A2-4C71-B1C5-C7ED6F01BD54, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:24943 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 92795074-2241-4181-A5F3-5FD71558F12C amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2026-01-08T15:24:28.662931Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4C338ABA-14A2-4C71-B1C5-C7ED6F01BD54, response# GetObjectResult { } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] Test command err: RandomSeed# 10160389073825235039 2026-01-08T15:24:28.565603Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [2:131:1] Cookie# 12154453481573180787 2026-01-08T15:24:28.565679Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 8 2026-01-08T15:24:28.589600Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 12154453481573180787 SessionId# [2:131:1] Binding# {1.0/12154453481573180787@[2:131:1]} Record# {Rejected: true } 2026-01-08T15:24:28.589718Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC03@distconf_binding.cpp:403} AbortBinding Binding# {1.0/12154453481573180787@[2:131:1]} Reason# binding rejected by peer 2026-01-08T15:24:28.589775Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC24@distconf_scatter_gather.cpp:125} AbortAllScatterTasks Binding# {1.0/12154453481573180787@[2:131:1]} 2026-01-08T15:24:28.589845Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 1 Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} 2026-01-08T15:24:28.589900Z 2 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.024346s 2026-01-08T15:24:28.590055Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [8:61:5] Cookie# 5275644117557875179 2026-01-08T15:24:28.590103Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [8:61:5] Inserted# false Subscription# {SessionId# [8:61:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:28.590178Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 5275644117557875179 SessionId# [8:61:5] Binding# {5.1/5275644117557875179@[8:61:5]} Record# {RootNodeId: 2 } 2026-01-08T15:24:28.621749Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.621836Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.621869Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.621900Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.621935Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.625351Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639257 Sender# [1:301:41] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:24:28.625585Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.625694Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [1:220:36] SessionId# [3:134:1] Cookie# 17094023622076131825 2026-01-08T15:24:28.625752Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [3:134:1] Inserted# false Subscription# {SessionId# [3:134:1] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:28.625854Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 1 Cookie# 17094023622076131825 SessionId# [3:134:1] Binding# {2.2/13599539317504433367@[3:109:2]} Record# {CacheUpdate { } } RootNodeId# 2 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:28.625908Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [3:134:1] Inserted# false Subscription# {SessionId# [3:134:1] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:28.625975Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [5:140:1] Cookie# 11483356691507329905 2026-01-08T15:24:28.626020Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [5:140:1] Inserted# false Subscription# {SessionId# [5:140:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:28.626124Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 11483356691507329905 SessionId# [5:140:1] Binding# {1.2/11483356691507329905@[5:140:1]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.626179Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:28.626238Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:28.626273Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:28.638518Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:28.638664Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:28.638850Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.638908Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.638943Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.638983Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.639022Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.639167Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [3:234:20] SessionId# [2:108:2] Cookie# 13599539317504433367 2026-01-08T15:24:28.639233Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [2:108:2] Inserted# false Subscription# {SessionId# [2:108:2] SubscriptionCookie# 0} NextSubscribeCookie# 8 2026-01-08T15:24:28.639328Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 3 Cookie# 13599539317504433367 SessionId# [2:108:2] Binding# Record# {CacheUpdate { } } RootNodeId# 2 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:28.639371Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [2:108:2] Inserted# false Subscription# {SessionId# [2:108:2] SubscriptionCookie# 0} NextSubscribeCookie# 8 2026-01-08T15:24:28.639451Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [3:234:20] SessionId# [1:133:2] Cookie# 17094023622076131825 2026-01-08T15:24:28.639498Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [1:133:2] Inserted# false Subscription# {SessionId# [1:133:2] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:28.639575Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 3 Cookie# 17094023622076131825 SessionId# [1:133:2] Binding# {3.2/17094023622076131825@[1:133:2]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.639638Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [3:234:20] SessionId# [9:104:3] Cookie# 11981770982771384938 2026-01-08T15:24:28.639676Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [9:104:3] Inserted# false Subscription# {SessionId# [9:104:3] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:28.639725Z 9 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 3 Cookie# 11981770982771384938 SessionId# [9:104:3] Binding# {3.2/11981770982771384938@[9:104:3]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.639764Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [8:61:5] Cookie# 5275644117557875179 2026-01-08T15:24:28.639796Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [8:61:5] Inserted# false Subscription# {SessionId# [8:61:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:28.639843Z 8 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 5275644117557875179 SessionId# [8:61:5] Binding# {5.2/5275644117557875179@[8:61:5]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.640072Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [3:109:2] Cookie# 13599539317504433367 2026-01-08T15:24:28.640110Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [3:109:2] Inserted# false Subscription# {SessionId# [3:109:2] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:28.640165Z 3 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 13599539317504433367 SessionId# [3:109:2] Binding# {2.2/13599539317504433367@[3:109:2]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.640223Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [4:112:2] Cookie# 6184275946249181489 2026-01-08T15:24:28.640282Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [4:112:2] Inserted# false Subscription# {SessionId# [4:112:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:28.640339Z 4 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 6184275946249181489 SessionId# [4:112:2] Binding# {2.2/6184275946249181489@[4:112:2]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.640380Z 6 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [6:118:2] Cookie# 18176373840711225173 2026-01-08T15:24:28.640413Z 6 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [6:118:2] Inserted# false Subscription# {SessionId# [6:118:2] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:28.640460Z 6 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 18176373840711225173 SessionId# [6:118:2] Binding# {2.2/18176373840711225173@[6:118:2]} Record# {RootNodeId: 2 CacheUpdate { } } 2026-01-08T15:24:28.640520Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [5:140:1] Cookie# 11483356691507329905 2026-01-08T15:24:28.640553Z 5 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf ... -08T15:24:28.732227Z 1 00h00m03.279745s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:28.732265Z 1 00h00m03.279745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.732300Z 1 00h00m03.279745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.732327Z 1 00h00m03.279745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.732366Z 1 00h00m03.279745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.732403Z 1 00h00m03.279745s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.749737Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.749892Z 1 00h00m06.906146s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:28.749942Z 1 00h00m06.906146s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:28.749976Z 1 00h00m06.906146s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:28.750004Z 1 00h00m06.906146s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:28.750045Z 1 00h00m06.906146s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:28.750089Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.750131Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.750175Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.750205Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.750237Z 1 00h00m06.906146s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.754137Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.754249Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:28.754297Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.762456Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:28.762510Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:28.762541Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:28.762607Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:28.762681Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:28.762743Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:28.766636Z 1 00h00m14.304004s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.766769Z 1 00h00m14.304004s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:28.766819Z 1 00h00m14.304004s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:28.766849Z 1 00h00m14.304004s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:28.766876Z 1 00h00m14.304004s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:28.766905Z 1 00h00m14.304004s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:28.766962Z 1 00h00m14.304004s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.767009Z 1 00h00m14.304004s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 1 ClusterStateGuid: 2} 2026-01-08T15:24:28.767042Z 1 00h00m14.304004s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2026-01-08T15:24:28.767133Z 1 00h00m14.304004s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2026-01-08T15:24:28.778821Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.778938Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:28.778992Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:28.779024Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:28.779061Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:28.779090Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:28.779171Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:28.779240Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 2 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:28.779278Z 1 00h00m20.100000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2026-01-08T15:24:28.779373Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2026-01-08T15:24:28.779487Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:28.779534Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2026-01-08T15:24:28.779606Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2026-01-08T15:24:28.779705Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaUpdate TabletID: 72057594037932033} 2026-01-08T15:24:28.779746Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2026-01-08T15:24:28.779797Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2026-01-08T15:24:28.782916Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: NKikimrStateStorage.TEvCleanup TabletID: 72057594037932033 ProposedLeader { RawX1: 0 RawX2: 0 } ClusterStateGeneration: 3 ClusterStateGuid: 4 2026-01-08T15:24:28.782999Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2026-01-08T15:24:28.783073Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2026-01-08T15:24:28.790939Z 1 00h00m29.099720s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:28.791108Z 1 00h00m29.099720s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:28.791161Z 1 00h00m29.099720s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:28.791193Z 1 00h00m29.099720s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:28.791227Z 1 00h00m29.099720s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:28.791255Z 1 00h00m29.099720s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:28.791293Z 1 00h00m29.099720s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:28.791364Z 1 00h00m29.099720s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 1 ClusterStateGuid: 2} 2026-01-08T15:24:28.791392Z 1 00h00m29.099720s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2026-01-08T15:24:28.791520Z 1 00h00m29.099720s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] Test command err: RandomSeed# 12574605220768507430 2026-01-08T15:24:29.159474Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [6:255:20] SessionId# [1:142:5] Cookie# 15406320909233120 2026-01-08T15:24:29.159571Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 6 SessionId# [1:142:5] Inserted# false Subscription# {SessionId# [1:142:5] SubscriptionCookie# 0} NextSubscribeCookie# 9 2026-01-08T15:24:29.170265Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 6 Cookie# 15406320909233120 SessionId# [1:142:5] Binding# {6.6/15406320909233120@[1:142:5]} Record# {RootNodeId: 1 } 2026-01-08T15:24:29.170406Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC03@distconf_binding.cpp:403} AbortBinding Binding# {6.6/15406320909233120@[1:142:5]} Reason# binding cycle 2026-01-08T15:24:29.170506Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC24@distconf_scatter_gather.cpp:125} AbortAllScatterTasks Binding# {6.6/15406320909233120@[1:142:5]} 2026-01-08T15:24:29.170619Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 6 Subscription# {SessionId# [1:142:5] SubscriptionCookie# 0} 2026-01-08T15:24:29.170683Z 1 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.011254s 2026-01-08T15:24:29.170763Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [1:220:36] SessionId# [6:143:1] Cookie# 15406320909233120 2026-01-08T15:24:29.170816Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [6:143:1] Inserted# false Subscription# {SessionId# [6:143:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:29.170977Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 1 Cookie# 15406320909233120 SessionId# [6:143:1] Binding# Record# {BoundNodes { NodeId { Host: "127.0.0.6" Port: 19001 NodeId: 6 } Meta { Fingerprint: "\3403\207\365\032> Record# {RootNodeId: 6 } 2026-01-08T15:24:29.172931Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639240 Sender# [1:220:36] SessionId# [6:143:1] Cookie# 15406320909233120 2026-01-08T15:24:29.172978Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [6:143:1] Inserted# false Subscription# {SessionId# [6:143:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:29.173012Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:750} TEvNodeConfigUnbind NodeId# 1 Cookie# 15406320909233120 SessionId# [6:143:1] Binding# 2026-01-08T15:24:29.173043Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:759} UnbindNode NodeId# 1 Reason# explicit unbind request 2026-01-08T15:24:29.173073Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:29.173106Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.7:19001/7 2026-01-08T15:24:29.173137Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:29.173166Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:29.173200Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:29.173229Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:29.173274Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.2:19001/2 2026-01-08T15:24:29.173327Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.5:19001/5 2026-01-08T15:24:29.173355Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:29.173408Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 1 Subscription# {SessionId# [6:143:1] SubscriptionCookie# 0} 2026-01-08T15:24:29.173478Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:29.173535Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:188} Initiated bind NodeId# 5 Binding# {5.0/8448147278750808093@[0:0:0]} SessionId# [0:0:0] 2026-01-08T15:24:29.173587Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [8:149:1] Cookie# 4649704648918659310 2026-01-08T15:24:29.173623Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [8:149:1] Inserted# false Subscription# {SessionId# [8:149:1] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:29.173696Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 4649704648918659310 SessionId# [8:149:1] Binding# {1.6/4649704648918659310@[8:149:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:29.173759Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [2:131:1] Cookie# 3571190183323266623 2026-01-08T15:24:29.173845Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:29.173906Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 3571190183323266623 SessionId# [2:131:1] Binding# {1.6/3571190183323266623@[2:131:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:29.173946Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [9:152:1] Cookie# 10321248218556464531 2026-01-08T15:24:29.173979Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [9:152:1] Inserted# false Subscription# {SessionId# [9:152:1] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:29.174039Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 10321248218556464531 SessionId# [9:152:1] Binding# {1.6/10321248218556464531@[9:152:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:29.174104Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [3:134:1] Cookie# 5615237098700432720 2026-01-08T15:24:29.174138Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [3:134:1] Inserted# false Subscription# {SessionId# [3:134:1] SubscriptionCookie# 0} NextSub ... 1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.293602Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.293720Z 1 00h00m13.597002s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.293780Z 1 00h00m13.597002s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.293815Z 1 00h00m13.597002s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.293844Z 1 00h00m13.597002s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.293871Z 1 00h00m13.597002s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.293923Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.293970Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.294015Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.294049Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.294084Z 1 00h00m13.597002s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.300712Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.300868Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.300931Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.300966Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.301015Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.301046Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.301100Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.301162Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.301198Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.308271Z 1 00h00m29.113643s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.308369Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.308434Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.308463Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2026-01-08T15:24:29.308528Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.308557Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.308600Z 1 00h00m29.113643s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.308644Z 1 00h00m29.113643s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.308699Z 1 00h00m29.113643s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2026-01-08T15:24:29.308743Z 1 00h00m29.113643s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 2} 2026-01-08T15:24:29.308773Z 1 00h00m29.113643s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2026-01-08T15:24:29.308844Z 1 00h00m29.113643s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2026-01-08T15:24:29.310997Z 1 00h00m30.163465s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.311082Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.311135Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.311161Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2026-01-08T15:24:29.311209Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.311237Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.311265Z 1 00h00m30.163465s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.311310Z 1 00h00m30.163465s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.311374Z 1 00h00m30.163465s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2026-01-08T15:24:29.311424Z 1 00h00m30.163465s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 2 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.311458Z 1 00h00m30.163465s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2026-01-08T15:24:29.311537Z 1 00h00m30.163465s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2026-01-08T15:24:29.320630Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.320733Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.320780Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.320811Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.320844Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.320875Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.320944Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.320996Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.321045Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.329883Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.329976Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.330019Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.330049Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.330092Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.330127Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.330180Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.330234Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.330271Z 1 00h00m50.300000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=0 2026-01-08T15:24:29.330596Z 1 00h00m50.300000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> BridgeGet::PartRestorationAcrossBridge [GOOD] Test command err: RandomSeed# 2802550321258733288 originalGroupIndex# 0 indexOnly# 1 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 originalGroupIndex# 0 indexOnly# 0 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 originalGroupIndex# 1 indexOnly# 1 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 originalGroupIndex# 1 indexOnly# 0 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 originalGroupIndex# 2 indexOnly# 1 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 originalGroupIndex# 2 indexOnly# 0 mustRestoreFirst# 1 *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 *** performing bridge get *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 >> TStateStorageRingGroupState::TestStateStorageUpdateSigConfigVersionChanged [GOOD] >> ReadOnlyVDisk::TestReads [GOOD] >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnly [GOOD] Test command err: RandomSeed# 2717413210680283657 2026-01-08T15:24:29.080969Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 131077 Sender# [1:130:1] SessionId# [0:0:0] Cookie# 9 2026-01-08T15:24:29.081047Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:223} TEvNodeConnected NodeId# 2 SessionId# [1:130:1] Cookie# 9 CookieInFlight# true SubscriptionExists# true 2026-01-08T15:24:29.081155Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [1:235:51] SessionId# [5:140:1] Cookie# 744450034349519535 2026-01-08T15:24:29.081213Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [5:140:1] Inserted# false Subscription# {SessionId# [0:0:0] SubscriptionCookie# 5} NextSubscribeCookie# 6 2026-01-08T15:24:29.099576Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 1 Cookie# 744450034349519535 SessionId# [5:140:1] Binding# {1.0/8353576489550390108@[0:0:0]} Record# {Initial: true BoundNodes { NodeId { Host: "127.0.0.7" Port: 19001 NodeId: 7 } Meta { Fingerprint: "\3403\207\365\032>> TStateStorageRingGroupState::TestStateStorageDoubleReply |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorage2RingGroups::TestStateStorageReplyOnceWriteOnlyReverseEventsOrder [GOOD] Test command err: RandomSeed# 11091356631706654496 2026-01-08T15:24:29.296042Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:235:51] SessionId# [7:146:1] Cookie# 16836658760662493661 2026-01-08T15:24:29.296103Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.302722Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 16836658760662493661 SessionId# [7:146:1] Binding# Record# {RootNodeId: 1 } 2026-01-08T15:24:29.302796Z 7 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006784s 2026-01-08T15:24:29.302851Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:235:51] SessionId# [2:131:1] Cookie# 12413157415729108278 2026-01-08T15:24:29.302902Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:29.302964Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 12413157415729108278 SessionId# [2:131:1] Binding# {1.1/12413157415729108278@[2:131:1]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.303225Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [1:235:51] SessionId# [7:146:1] Cookie# 11412617003282388875 2026-01-08T15:24:29.303271Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.303370Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 1 Cookie# 11412617003282388875 SessionId# [7:146:1] Binding# Record# {DeletedBoundNodeIds { Host: "127.0.0.7" Port: 19001 NodeId: 7 } } RootNodeId# 7 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:29.303432Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.303472Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.7:19001/7 2026-01-08T15:24:29.303603Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:242:20] SessionId# [3:109:2] Cookie# 4660315997723564173 2026-01-08T15:24:29.303632Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [3:109:2] Inserted# false Subscription# {SessionId# [3:109:2] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:29.303688Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 4660315997723564173 SessionId# [3:109:2] Binding# {2.1/4660315997723564173@[3:109:2]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.303733Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:242:20] SessionId# [5:115:2] Cookie# 17789694012581895722 2026-01-08T15:24:29.303761Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [5:115:2] Inserted# false Subscription# {SessionId# [5:115:2] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.303798Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 17789694012581895722 SessionId# [5:115:2] Binding# {2.1/17789694012581895722@[5:115:2]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.306294Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [3:249:20] SessionId# [9:104:3] Cookie# 1422572452306587799 2026-01-08T15:24:29.306363Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [9:104:3] Inserted# false Subscription# {SessionId# [9:104:3] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:29.306440Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 3 Cookie# 1422572452306587799 SessionId# [9:104:3] Binding# {3.1/1422572452306587799@[9:104:3]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.306482Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [3:249:20] SessionId# [4:89:3] Cookie# 13297601091343938295 2026-01-08T15:24:29.306521Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 3 SessionId# [4:89:3] Inserted# false Subscription# {SessionId# [4:89:3] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:29.306574Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 3 Cookie# 13297601091343938295 SessionId# [4:89:3] Binding# {3.1/13297601091343938295@[4:89:3]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.306611Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:263:20] SessionId# [8:61:5] Cookie# 11613173692299051639 2026-01-08T15:24:29.306646Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [8:61:5] Inserted# false Subscription# {SessionId# [8:61:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:29.306688Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 11613173692299051639 SessionId# [8:61:5] Binding# {5.1/11613173692299051639@[8:61:5]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.306720Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:263:20] SessionId# [6:55:5] Cookie# 2551927778976786825 2026-01-08T15:24:29.306757Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [6:55:5] Inserted# false Subscription# {SessionId# [6:55:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:29.306795Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 2551927778976786825 SessionId# [6:55:5] Binding# {5.1/2551927778976786825@[6:55:5]} Record# {RootNodeId: 7 } 2026-01-08T15:24:29.314900Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.314958Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.314986Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315027Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315054Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315080Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315110Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315139Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315162Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.315212Z 1 00h00m00.000000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.318095Z 1 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639257 Sender# [1:340:56] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:24:29.318338Z 1 00h00m00.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleInit ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.318419Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [1:235:51] SessionId# [7:146:1] Cookie# 11412617003282388875 2026-01-08T15:24:29.318466Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.318558Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 1 Cookie# 11412617003282388875 SessionId# [7:146:1] Binding# Record# {CacheUpdate { } } RootNodeId# 7 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:29.318606Z 7 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:29.318664Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:235:51] SessionId# [2:131:1] Cookie# 12413157415729108278 2026-01-08T15:24:29.318694Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:29.318757Z 2 00h00m00.002048s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 12413157415729108278 SessionId# [2:131:1] Binding# {1.7/12413157415729108278@[2:131:1]} Record# {RootNodeId: 7 CacheUpdate { } } 2026-01-08T15:24:29.318804Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.318863Z 1 00h00m00.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:1 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.318980Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.319025Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.319067Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.319096Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.319121Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.319143Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.319192Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.319221Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.319253Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.319282Z 1 00h00m00.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.319331Z 1 00h ... 94037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:29.414034Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.414069Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.414112Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.414140Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.414166Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.414190Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:29.414216Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:29.414248Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:29.414276Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:29.414300Z 1 00h00m08.744301s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:29.414890Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.414928Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.414955Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.414999Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415042Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415089Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415115Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415155Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415195Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415225Z 1 00h00m08.744301s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:29.415298Z 1 00h00m08.744301s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleTEvInfo ev: {EvInfo Status: 5 TabletID: 72057594037936131 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 5 Signature: {{[1:24343667:0] : 154}, {[1:4398070854771:0] : 158}, {[1:2199047599219:0] : 156}, {[1:3298559226995:0] : 157}, {[1:1099535971443:0] : 155}}}} 2026-01-08T15:24:29.415353Z 1 00h00m08.744301s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleTEvInfo ev: {EvInfo Status: 5 TabletID: 72057594037936131 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 5 Signature: {{[1:9895628993651:0] : 163}, {[1:7696605738099:0] : 161}, {[1:8796117365875:0] : 162}, {[1:6597094110323:0] : 160}, {[1:5497582482547:0] : 159}}}} 2026-01-08T15:24:29.415421Z 1 00h00m08.744301s :STATESTORAGE DEBUG: RingGroupProxyRequest::Reply TEvInfo ev: {EvInfo Status: 5 TabletID: 72057594037936131 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 10 Signature: {{[1:8796117365875:0] : 162}, {[1:5497582482547:0] : 159}, {[1:2199047599219:0] : 156}, {[1:9895628993651:0] : 163}, {[1:6597094110323:0] : 160}, {[1:3298559226995:0] : 157}, {[1:24343667:0] : 154}, {[1:7696605738099:0] : 161}, {[1:4398070854771:0] : 158}, {[1:1099535971443:0] : 155}}}} 2026-01-08T15:24:29.417440Z 1 00h00m10.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleInit ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigAsync} 2026-01-08T15:24:29.417518Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigAsync} 2026-01-08T15:24:29.417582Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:1 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigAsync} 2026-01-08T15:24:29.417624Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.417664Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.417693Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.417726Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.417767Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.417810Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:29.417837Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:29.417861Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:29.417902Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:29.417929Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:29.417974Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418019Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418051Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418097Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418147Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418209Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418245Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418277Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:1 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418311Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:1 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418356Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:1 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:29.418441Z 1 00h00m10.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleTEvInfo ev: {EvInfo Status: 0 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 154}, {[1:2199047599219:0] : 156}, {[1:1099535971443:0] : 155}}}} 2026-01-08T15:24:29.418514Z 1 00h00m10.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::HandleTEvInfo ev: {EvInfo Status: 0 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:7696605738099:0] : 161}, {[1:6597094110323:0] : 160}, {[1:5497582482547:0] : 159}}}} 2026-01-08T15:24:29.418603Z 1 00h00m10.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::Reply TEvInfo ev: {EvInfo Status: 0 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [1:332:54] CurrentLeaderTablet: [1:340:56] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 8 Signature: {{[1:5497582482547:0] : 159}, {[1:2199047599219:0] : 156}, {[1:6597094110323:0] : 160}, {[1:3298559226995:0] : 157}, {[1:24343667:0] : 154}, {[1:7696605738099:0] : 161}, {[1:4398070854771:0] : 158}, {[1:1099535971443:0] : 155}}}} 2026-01-08T15:24:29.418688Z 1 00h00m10.002048s :STATESTORAGE DEBUG: RingGroupProxyRequest::Reply TEvUpdateSignature ev: {EvUpdateSignature TabletID: 72057594037932033 Signature: { Size: 10 Signature: {{[1:8796117365875:0] : 162}, {[1:5497582482547:0] : 159}, {[1:2199047599219:0] : 156}, {[1:9895628993651:0] : 163}, {[1:6597094110323:0] : 160}, {[1:3298559226995:0] : 157}, {[1:24343667:0] : 154}, {[1:7696605738099:0] : 161}, {[1:4398070854771:0] : 158}, {[1:1099535971443:0] : 155}}}} |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] >> SysViewQueryHistory::StableMerge2 [GOOD] |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortUnknownUpload [GOOD] Test command err: 2026-01-08T15:24:29.187372Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# BA8F1C7F-2BD5-423D-A801-4F4BEDB89EC4, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: uploadId } REQUEST: DELETE /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:18317 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 669F5198-C15F-4DF3-84FB-87DE02EC76C8 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=uploadId 2026-01-08T15:24:29.215301Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# BA8F1C7F-2BD5-423D-A801-4F4BEDB89EC4, response# >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction >> TBsOther1::ChaoticParallelWrite [GOOD] >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload >> TStateStorage2RingGroups::TestStateStorageReplyOnce >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction >> TStateStorageRingGroupState::TestStateStorageUpdateSig |83.8%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut >> TStateStorageRingGroupState::TestStateStorageDoubleReply [GOOD] >> SysViewQueryHistory::TopReadBytesAdd |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut |83.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut >> SysViewQueryHistory::TopReadBytesAdd [GOOD] >> TStateStorage2RingGroups::TestStateStorageReplyOnce [GOOD] >> TStateStorageRingGroupState::TestStateStorageUpdateSig [GOOD] |83.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestStateStorageUpdateSigConfigVersionChanged [GOOD] Test command err: RandomSeed# 14795500574186978529 2026-01-08T15:24:30.445902Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [5:248:20] SessionId# [4:70:4] Cookie# 14601657833513671867 2026-01-08T15:24:30.445984Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [4:70:4] Inserted# false Subscription# {SessionId# [4:70:4] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:30.472391Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 5 Cookie# 14601657833513671867 SessionId# [4:70:4] Binding# {8.0/17897728959332712829@[0:0:0]} Record# {BoundNodes { NodeId { Host: "127.0.0.8" Port: 19001 NodeId: 8 } Meta { Fingerprint: "\3403\207\365\032>ClusterStateGeneration=0 clusterStateGeneration=10 Info->ClusterStateGuid=0 clusterStateGuid=0 2026-01-08T15:24:30.689365Z 1 00h00m10.002048s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 10 ClusterStateGuid: 0 2026-01-08T15:24:30.695488Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:30.695583Z 1 00h00m17.647821s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:30.695632Z 1 00h00m17.647821s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:30.695662Z 1 00h00m17.647821s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:30.695692Z 1 00h00m17.647821s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:30.695729Z 1 00h00m17.647821s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:30.695788Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.695845Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.695876Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.695903Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.695931Z 1 00h00m17.647821s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.713207Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:30.713345Z 1 00h00m36.715958s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:30.713400Z 1 00h00m36.715958s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:30.713428Z 1 00h00m36.715958s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:30.713467Z 1 00h00m36.715958s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:30.713502Z 1 00h00m36.715958s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:30.713550Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.713598Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.713643Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.713673Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:30.713705Z 1 00h00m36.715958s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestReads [GOOD] Test command err: RandomSeed# 16867721818924939889 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #1 to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #2 to read-only === Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #1 === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #2 === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #3 === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #4 === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #5 === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #6 === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TS3WrapperTests::MultipartUpload >> TS3WrapperTests::CompleteUnknownUpload |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] Test command err: RandomSeed# 8272094451565075963 2026-01-08T15:24:30.415512Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [1:130:1] Cookie# 14951804266483163343 2026-01-08T15:24:30.415607Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [1:130:1] Inserted# false Subscription# {SessionId# [1:130:1] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:30.426273Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 14951804266483163343 SessionId# [1:130:1] Binding# {2.2/14951804266483163343@[1:130:1]} Record# {RootNodeId: 6 } 2026-01-08T15:24:30.430515Z 1 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.015013s 2026-01-08T15:24:30.430601Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [3:109:2] Cookie# 7292233619171058204 2026-01-08T15:24:30.430663Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [3:109:2] Inserted# false Subscription# {SessionId# [3:109:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:30.430754Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 7292233619171058204 SessionId# [3:109:2] Binding# {2.2/7292233619171058204@[3:109:2]} Record# {RootNodeId: 6 } 2026-01-08T15:24:30.430795Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [4:112:2] Cookie# 4741266827539300121 2026-01-08T15:24:30.430836Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [4:112:2] Inserted# false Subscription# {SessionId# [4:112:2] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:30.430880Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 4741266827539300121 SessionId# [4:112:2] Binding# {2.2/4741266827539300121@[4:112:2]} Record# {RootNodeId: 6 } 2026-01-08T15:24:30.430939Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [6:255:20] SessionId# [1:142:5] Cookie# 14198862588209634897 2026-01-08T15:24:30.430971Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 6 SessionId# [1:142:5] Inserted# false Subscription# {SessionId# [1:142:5] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:30.431191Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 6 Cookie# 14198862588209634897 SessionId# [1:142:5] Binding# {2.6/14951804266483163343@[1:130:1]} Record# {BoundNodes { NodeId { Host: "127.0.0.4" Port: 19001 NodeId: 4 } Meta { Fingerprint: "\3403\207\365\032> Record# {BoundNodes { NodeId { Host: "127.0.0.7" Port: 19001 NodeId: 7 } Meta { Fingerprint: "\3403\207\365\032>> SysViewQueryHistory::AggrMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestStateStorageDoubleReply [GOOD] Test command err: RandomSeed# 16257066746528869448 2026-01-08T15:24:31.159176Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 131077 Sender# [8:29:7] SessionId# [0:0:0] Cookie# 5 2026-01-08T15:24:31.159238Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:223} TEvNodeConnected NodeId# 7 SessionId# [8:29:7] Cookie# 5 CookieInFlight# true SubscriptionExists# true 2026-01-08T15:24:31.159302Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC09@distconf_binding.cpp:259} Continuing bind Binding# {7.0/12448184711493366336@[0:0:0]} 2026-01-08T15:24:31.159368Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [8:269:20] SessionId# [5:60:7] Cookie# 12448184711493366335 2026-01-08T15:24:31.159405Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [5:60:7] Inserted# false Subscription# {SessionId# [5:60:7] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:31.170182Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 8 Cookie# 12448184711493366335 SessionId# [5:60:7] Binding# {1.1/8835371047657518280@[5:140:1]} Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.7" Port: 19001 NodeId: 7 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.2" Port: 19001 NodeId: 2 } DeletedBoundNodeIds { Host: "127.0.0.5" Port: 19001 NodeId: 5 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } RootNodeId# 1 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:31.170267Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [5:60:7] Inserted# false Subscription# {SessionId# [5:60:7] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:31.172004Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:31.172102Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.7:19001/7 2026-01-08T15:24:31.172133Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:31.172183Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:31.172216Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:31.172253Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.2:19001/2 2026-01-08T15:24:31.172284Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.5:19001/5 2026-01-08T15:24:31.172309Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:31.172425Z 5 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.013053s 2026-01-08T15:24:31.172484Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [9:127:2] Cookie# 4747731008240938824 2026-01-08T15:24:31.172526Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [9:127:2] Inserted# false Subscription# {SessionId# [9:127:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:31.172620Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 4747731008240938824 SessionId# [9:127:2] Binding# {2.8/4747731008240938824@[9:127:2]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.172668Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [3:109:2] Cookie# 6082254955469741203 2026-01-08T15:24:31.172700Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [3:109:2] Inserted# false Subscription# {SessionId# [3:109:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:31.172736Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 6082254955469741203 SessionId# [3:109:2] Binding# {2.8/6082254955469741203@[3:109:2]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.172778Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [4:112:2] Cookie# 5607693221205710601 2026-01-08T15:24:31.172823Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [4:112:2] Inserted# false Subscription# {SessionId# [4:112:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:31.172864Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 5607693221205710601 SessionId# [4:112:2] Binding# {2.8/5607693221205710601@[4:112:2]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.172900Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [8:61:5] Cookie# 12448184711493366335 2026-01-08T15:24:31.172936Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 12448184711493366335 SessionId# [8:61:5] Binding# {7.0/12448184711493366336@[8:29:7]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.173029Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639240 Sender# [8:269:20] SessionId# [5:60:7] Cookie# 12448184711493366335 2026-01-08T15:24:31.173055Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [5:60:7] Inserted# false Subscription# {SessionId# [5:60:7] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:31.173089Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:750} TEvNodeConfigUnbind NodeId# 8 Cookie# 12448184711493366335 SessionId# [5:60:7] Binding# {1.1/8835371047657518280@[5:140:1]} 2026-01-08T15:24:31.173163Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:759} UnbindNode NodeId# 8 Reason# explicit unbind request 2026-01-08T15:24:31.173203Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 8 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:31.173310Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 8 Subscription# {SessionId# [5:60:7] SubscriptionCookie# 0} 2026-01-08T15:24:31.173648Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [8:269:20] SessionId# [7:28:7] Cookie# 12448184711493366336 2026-01-08T15:24:31.173773Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 8 Cookie# 12448184711493366336 SessionId# [7:28:7] Binding# {1.1/9163014183970835205@[7:146:1]} Record# {Initial: true BoundNodes { NodeId { Host: "127.0.0.8" Port: 19001 NodeId: 8 } Meta { Fingerprint: "\3403\207\365\032> Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.7" Port: 19001 NodeId: 7 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.2" Port: 19001 NodeId: 2 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } RootNodeId# 1 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:31.174166Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [1:139:4] Inserted# false Subscription# {SessionId# [1:139:4] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:31.174198Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:31.174245Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.7:19001/7 2026-01-08T15:24:31.174270Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:31.174303Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:31.182479Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:31.182527Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.2:19001/2 2026-01-08T15:24:31.182555Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:31.182624Z 1 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008652s 2026-01-08T15:24:31.183502Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [5:248:20] SessionId# [1:139:4] Cookie# 8835371047657518280 2026-01-08T15:24:31.183555Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [1:139:4] Inserted# false Subscription# {SessionId# [1:139:4] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:31.183666Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 5 Cookie# 8835371047657518280 SessionId# [1:139:4] Binding# Record# {DeletedBoundNodeIds { Host: "127.0.0.8" Port: 19001 NodeId: 8 } } RootNodeId# 1 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:31.183705Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [1:139:4] Inserted# false Subscription# {SessionId# [1:139:4] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:31.183750Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:31.183837Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [0:0:0] Inserted# true ... ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.292068Z 1 00h00m00.882078s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.292095Z 1 00h00m00.882078s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.292137Z 1 00h00m00.882078s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.292172Z 1 00h00m00.882078s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.295113Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:31.298443Z 1 00h00m01.922933s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:31.298516Z 1 00h00m01.922933s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:31.298551Z 1 00h00m01.922933s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:31.298587Z 1 00h00m01.922933s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:31.298619Z 1 00h00m01.922933s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:31.298661Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.298712Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.298738Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.298767Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.298792Z 1 00h00m01.922933s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.301076Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:31.301139Z 1 00h00m04.254448s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:31.301175Z 1 00h00m04.254448s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:31.301215Z 1 00h00m04.254448s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:31.301242Z 1 00h00m04.254448s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:31.301278Z 1 00h00m04.254448s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:31.301321Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.301361Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.301395Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.301439Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.301475Z 1 00h00m04.254448s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.309372Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:31.309454Z 1 00h00m09.150629s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:31.309511Z 1 00h00m09.150629s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:31.309535Z 1 00h00m09.150629s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:31.309559Z 1 00h00m09.150629s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:31.309589Z 1 00h00m09.150629s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:31.309623Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.309663Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.309688Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.309711Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.309740Z 1 00h00m09.150629s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:31.313693Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigAsync} 2026-01-08T15:24:31.313809Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2026-01-08T15:24:31.313853Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2026-01-08T15:24:31.313880Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2026-01-08T15:24:31.313916Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 3} 2026-01-08T15:24:31.313952Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 4} 2026-01-08T15:24:31.314057Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.314124Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.314192Z 1 00h00m10.002048s :STATESTORAGE ERROR: TStateStorageProxyRequest::MergeReply duplicated TEvReplicaInfo cookie:0 replica:[1:24343667:0] signature:154 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.314244Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.314273Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.314305Z 1 00h00m10.002048s :STATESTORAGE ERROR: TStateStorageProxyRequest::MergeReply duplicated TEvReplicaInfo cookie:1 replica:[1:1099535971443:0] signature:155 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315269Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315340Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315394Z 1 00h00m10.002048s :STATESTORAGE ERROR: TStateStorageProxyRequest::HandleUpdateSig duplicated TEvReplicaInfo cookie:2 replica:[1:2199047599219:0] signature:156 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315438Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315501Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315541Z 1 00h00m10.002048s :STATESTORAGE ERROR: TStateStorageProxyRequest::HandleUpdateSig duplicated TEvReplicaInfo cookie:3 replica:[1:3298559226995:0] signature:157 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} 2026-01-08T15:24:31.315575Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleUpdateSig ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:297:39] CurrentLeaderTablet: [1:301:41] CurrentGeneration: 2 CurrentStep: 0} |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |83.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |83.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge2 [GOOD] >> TS3WrapperTests::CompleteUnknownUpload [GOOD] >> TS3WrapperTests::MultipartUpload [GOOD] |83.8%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |83.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TVDiskConfigTest::JustConfig [GOOD] >> TVDiskConfigTest::Basic [GOOD] >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] |83.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopReadBytesAdd [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestStateStorageUpdateSig [GOOD] Test command err: RandomSeed# 13662685421181941688 2026-01-08T15:24:31.759186Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [8:269:20] SessionId# [9:19:8] Cookie# 7112437736707115223 2026-01-08T15:24:31.759298Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [9:19:8] Inserted# false Subscription# {SessionId# [9:19:8] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:31.766694Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 8 Cookie# 7112437736707115223 SessionId# [9:19:8] Binding# {6.1/15901204079387369035@[9:47:6]} Record# {BoundNodes { NodeId { Host: "127.0.0.9" Port: 19001 NodeId: 9 } Meta { Fingerprint: "\3403\207\365\032>> TStateStorage2RingGroups::TestStateStorageReplyOnce [GOOD] Test command err: RandomSeed# 6747459594779002989 2026-01-08T15:24:31.628884Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [9:291:20] SessionId# [8:18:8] Cookie# 2251390371350501373 2026-01-08T15:24:31.635392Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 9 Cookie# 2251390371350501373 SessionId# [8:18:8] Binding# {7.1/13321199371136541525@[8:29:7]} Record# {Initial: true BoundNodes { NodeId { Host: "127.0.0.9" Port: 19001 NodeId: 9 } Meta { Fingerprint: "\3403\207\365\032> Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.7" Port: 19001 NodeId: 7 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.8" Port: 19001 NodeId: 8 } DeletedBoundNodeIds { Host: "127.0.0.5" Port: 19001 NodeId: 5 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } RootNodeId# 1 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:31.635997Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [1:130:1] Inserted# false Subscription# {SessionId# [1:130:1] SubscriptionCookie# 0} NextSubscribeCookie# 8 2026-01-08T15:24:31.636028Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:31.636062Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.7:19001/7 2026-01-08T15:24:31.636087Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:31.636119Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:31.636156Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:31.636194Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:31.636223Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.5:19001/5 2026-01-08T15:24:31.636247Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 2 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:31.636315Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 9 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 8 2026-01-08T15:24:31.636365Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:188} Initiated bind NodeId# 9 Binding# {9.0/14363580190843707070@[0:0:0]} SessionId# [0:0:0] 2026-01-08T15:24:31.636403Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [7:277:20] SessionId# [8:29:7] Cookie# 13321199371136541525 2026-01-08T15:24:31.636434Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 7 SessionId# [8:29:7] Inserted# false Subscription# {SessionId# [8:29:7] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:31.636496Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 7 Cookie# 13321199371136541525 SessionId# [8:29:7] Binding# {7.1/13321199371136541525@[8:29:7]} Record# {RootNodeId: 9 } 2026-01-08T15:24:31.636717Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [6:270:20] SessionId# [7:41:6] Cookie# 5867878523198859685 2026-01-08T15:24:31.636751Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 6 SessionId# [7:41:6] Inserted# false Subscription# {SessionId# [7:41:6] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:31.636806Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 6 Cookie# 5867878523198859685 SessionId# [7:41:6] Binding# {6.9/5867878523198859685@[7:41:6]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.636936Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 131077 Sender# [8:18:8] SessionId# [0:0:0] Cookie# 3 2026-01-08T15:24:31.636968Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:223} TEvNodeConnected NodeId# 9 SessionId# [8:18:8] Cookie# 3 CookieInFlight# true SubscriptionExists# true 2026-01-08T15:24:31.637029Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 131077 Sender# [1:151:8] SessionId# [0:0:0] Cookie# 8 2026-01-08T15:24:31.637062Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:223} TEvNodeConnected NodeId# 9 SessionId# [1:151:8] Cookie# 8 CookieInFlight# true SubscriptionExists# true 2026-01-08T15:24:31.637103Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC09@distconf_binding.cpp:259} Continuing bind Binding# {9.0/14363580190843707070@[0:0:0]} 2026-01-08T15:24:31.638041Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [8:284:20] SessionId# [9:19:8] Cookie# 2251390371350501373 2026-01-08T15:24:31.638081Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [9:19:8] Inserted# false Subscription# {SessionId# [9:19:8] SubscriptionCookie# 0} NextSubscribeCookie# 7 2026-01-08T15:24:31.638142Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 8 Cookie# 2251390371350501373 SessionId# [9:19:8] Binding# {8.0/2251390371350501373@[9:19:8]} Record# {RootNodeId: 1 } 2026-01-08T15:24:31.638180Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [8:284:20] SessionId# [7:28:7] Cookie# 13321199371136541525 2026-01-08T15:24:31.638209Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 8 SessionId# [7:28:7] Inserted# false Subscription# {SessionId# [7:28:7] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:31.638292Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 8 Cookie# 13321199371136541525 SessionId# [7:28:7] Binding# {6.1/5867878523198859685@[7:41:6]} Record# {BoundNodes { NodeId { Host: "127.0.0.9" Port: 19001 NodeId: 9 } Meta { Fingerprint: "\3403\207\365\032>> TS3WrapperTests::HeadObject |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |83.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TBlobStorageSyncNeighborsTest::CheckRevLookup [GOOD] >> TBlobStorageSyncNeighborsTest::CheckIsMyDomain [GOOD] >> TBlobStorageSyncNeighborsTest::CheckFailDomainsIterators [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] |83.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMerge [GOOD] |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk >> TQueueBackpressureTest::CreateDelete [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> TS3WrapperTests::HeadObject [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> TResizableCircleBufTest::Test1 [GOOD] >> TResizableCircleBufTest::Test2 [GOOD] >> TTrackable::TBuffer [GOOD] >> TPDiskErrorStateTests::Basic [GOOD] >> TPDiskErrorStateTests::Basic2 [GOOD] >> TPDiskErrorStateTests::BasicErrorReason [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBsLocalRecovery::WriteRestartRead [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::MultipartUpload [GOOD] Test command err: 2026-01-08T15:24:32.496203Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 67F3A772-675A-4508-9478-38A67413C70D, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:5088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4EC143EF-D6E3-40C7-AA42-EF4732875747 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2026-01-08T15:24:32.512258Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 67F3A772-675A-4508-9478-38A67413C70D, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2026-01-08T15:24:32.512767Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# CC326106-397C-478B-AD6F-561AD2768E3F, request# UploadPart { Bucket: TEST Key: key UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:5088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 13A45D59-4169-4E46-AD81-B9669F7A1D04 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=1 / 4 2026-01-08T15:24:32.523059Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# CC326106-397C-478B-AD6F-561AD2768E3F, response# UploadPartResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2026-01-08T15:24:32.526565Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 0E8A8388-9C59-4B9C-A0B5-AF727477F5B6, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: 1 MultipartUpload: { Parts: [841a2d689ad86bd1611447453c22c6fc] } } REQUEST: POST /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:5088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DE78759F-75D8-4F06-BD90-810AC30A3C68 amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=1 2026-01-08T15:24:32.530228Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 0E8A8388-9C59-4B9C-A0B5-AF727477F5B6, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key ETag: 841a2d689ad86bd1611447453c22c6fc } 2026-01-08T15:24:32.530657Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 5D50F9E9-9355-4522-84A1-F3EE6FAE8257, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:5088 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 2BF6D81B-04BE-4C09-813B-F0F383859DD6 amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2026-01-08T15:24:32.554808Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 5D50F9E9-9355-4522-84A1-F3EE6FAE8257, response# GetObjectResult { } |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TBsLocalRecovery::MultiPutWriteRestartRead |83.9%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CompleteUnknownUpload [GOOD] Test command err: 2026-01-08T15:24:32.522205Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# C10074AE-35C2-4D01-9B92-08639ED156D2, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: uploadId MultipartUpload: { Parts: [ETag] } } REQUEST: POST /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:3595 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 275C8896-39F3-40DF-923D-F61869479157 amz-sdk-request: attempt=1 content-length: 207 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=uploadId 2026-01-08T15:24:32.555266Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# C10074AE-35C2-4D01-9B92-08639ED156D2, response# |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::NoMoneyNoHoney [GOOD] |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TBlobStorageSyncNeighborsTest::CheckVDiskDistance [GOOD] |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::CreateDelete [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest >> TCircleBufStringStreamTest::TestNotAligned [GOOD] >> TCircleBufStringStreamTest::TestOverflow [GOOD] >> TCircleBufTest::EmptyTest [GOOD] >> TCircleBufTest::OverflowTest [GOOD] |83.9%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TBuffer [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TPDiskErrorStateTests::BasicErrorReason [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadObject [GOOD] Test command err: 2026-01-08T15:24:33.595304Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 9E913D27-7703-4469-83F6-602D46106BAC, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:5847 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3272C3D1-B8AB-487B-B15C-910D7857731A amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2026-01-08T15:24:33.611821Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 9E913D27-7703-4469-83F6-602D46106BAC, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2026-01-08T15:24:33.612407Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 9F252515-81D9-4A04-A8AD-AE730AF3121D, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:5847 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1ADA5306-53CD-4BE1-B622-8EF96F9E6DDE amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2026-01-08T15:24:33.619264Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 9F252515-81D9-4A04-A8AD-AE730AF3121D, response# HeadObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc ContentLength: 4 } |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest |83.9%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest >> TTrackable::TVector [GOOD] >> TTrackable::TList [GOOD] >> TTrackable::TString [GOOD] |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBlobStorageHullSstIt::TestSeekToLast [GOOD] >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] >> TBlobStorageHullWriteSst::LogoBlobOneSstMultiIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> TCowBTreeTest::MultipleSnapshotsWithGc [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClear >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] >> DqSpillingFileTests::Write_TotalSizeLimitExceeded >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] >> THullDsGenericNWayIt::ForwardIteration [GOOD] >> THullDsGenericNWayIt::BackwardIteration [GOOD] >> TBlobStorageHullDecimal::TestMkRatio [GOOD] >> TBlobStorageHullDecimal::TestMult [GOOD] >> TStateStorageConfig::TestReplicaSelection [GOOD] >> TStateStorageConfig::TestMultiReplicaFailDomains |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufTest::OverflowTest [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest >> TBlobStorageHullStorageRatio::Test [GOOD] >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullSstIt::TestSstIndexSaveLoad [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TTrackable::TString [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest >> TBlobStorageLinearTrackBar::TestLinearTrackBarDouble [GOOD] >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] Test command err: 2026-01-08T15:24:36.126847Z :KQP_COMPUTE ERROR: spilling_file.cpp:425: [Write] Total size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMult [GOOD] |83.9%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |83.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndexPartOutbound [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsGenericNWayIt::BackwardIteration [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark >> TIncrHugeBasicTest::Defrag [GOOD] >> THullDsHeapItTest::HeapLevelSliceForwardIteratorBenchmark [GOOD] >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageKeyBarrierTest::ParseTest [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageLinearTrackBar::TestLinearTrackBarWithDecimal [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne >> TBsLocalRecovery::MultiPutWriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartReadHuge |84.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |84.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest >> StatsFormat::AggregateStat [GOOD] |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest >> BSCReadOnlyPDisk::ReadOnlySlay >> TBlobStorageHullWriteSst::LogoBlobMultiSstOneIndex [GOOD] >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] |84.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |84.0%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapLevelSliceBackwardIteratorBenchmark [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Defrag [GOOD] Test command err: 2026-01-08T15:23:19.442589Z :BS_INCRHUGE DEBUG: incrhuge_keeper.cpp:72: BlockSize# 8128 BlocksInChunk# 2304 BlocksInMinBlob# 65 MaxBlobsPerChunk# 35 BlocksInDataSection# 2303 BlocksInIndexSection# 1 2026-01-08T15:23:19.442726Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:152: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] starting ReadLog 2026-01-08T15:23:19.443127Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:161: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] finished ReadLog 2026-01-08T15:23:19.443175Z :BS_INCRHUGE DEBUG: incrhuge_keeper_recovery.cpp:200: [PDisk# 000000001 Recovery] ApplyReadLog Chunks# [] Deletes# [] Owners# {} CurrentSerNum# 0 NextLsn# 1 2026-01-08T15:23:19.443209Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:515: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] ready 2026-01-08T15:23:19.443242Z :TEST DEBUG: test_actor_concurrent.h:153: finished Init Reference# [] Enumerated# [] InFlightDeletes# [] 2026-01-08T15:23:19.443256Z :TEST DEBUG: test_actor_concurrent.h:209: ActionsTaken# 1 2026-01-08T15:23:19.443271Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 0 InFlightWritesSize# 0 2026-01-08T15:23:19.445006Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:811717:0:0] Lsn# 0 NumReq# 0 2026-01-08T15:23:19.446496Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 0 HandleWrite Lsn# 0 DataSize# 811717 WriteQueueSize# 1 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.446534Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.446561Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.446582Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.447768Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 1 InFlightWritesSize# 1 2026-01-08T15:23:19.457579Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1745495:1:0] Lsn# 1 NumReq# 1 2026-01-08T15:23:19.460719Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 1 HandleWrite Lsn# 1 DataSize# 1745495 WriteQueueSize# 2 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.460741Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.460757Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.460773Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.461665Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 2 InFlightWritesSize# 2 2026-01-08T15:23:19.463114Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2 HandleWrite Lsn# 2 DataSize# 602037 WriteQueueSize# 3 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.463142Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 3 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.463157Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.463181Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.470400Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:602037:2:0] Lsn# 2 NumReq# 2 2026-01-08T15:23:19.471729Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 3 InFlightWritesSize# 3 2026-01-08T15:23:19.473751Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1287465:3:0] Lsn# 3 NumReq# 3 2026-01-08T15:23:19.476457Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 4 InFlightWritesSize# 4 2026-01-08T15:23:19.477345Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 3 HandleWrite Lsn# 3 DataSize# 1287465 WriteQueueSize# 4 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.477387Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 4 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.477409Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.477423Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.483786Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1501676:4:0] Lsn# 4 NumReq# 4 2026-01-08T15:23:19.486510Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 4 HandleWrite Lsn# 4 DataSize# 1501676 WriteQueueSize# 5 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.486534Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.486550Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.486565Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.487362Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 5 InFlightWritesSize# 5 2026-01-08T15:23:19.488366Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:687721:5:0] Lsn# 5 NumReq# 5 2026-01-08T15:23:19.489813Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 6 InFlightWritesSize# 6 2026-01-08T15:23:19.490562Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 5 HandleWrite Lsn# 5 DataSize# 687721 WriteQueueSize# 6 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.490587Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 6 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.490603Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.490618Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.493053Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1957662:6:0] Lsn# 6 NumReq# 6 2026-01-08T15:23:19.496520Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 6 HandleWrite Lsn# 6 DataSize# 1957662 WriteQueueSize# 7 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.496563Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.496579Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.496594Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.497100Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 7 InFlightWritesSize# 7 2026-01-08T15:23:19.499754Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1824284:7:0] Lsn# 7 NumReq# 7 2026-01-08T15:23:19.502522Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 7 HandleWrite Lsn# 7 DataSize# 1824284 WriteQueueSize# 8 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.502554Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.502579Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.502595Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2026-01-08T15:23:19.523343Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 1 Status# OK 2026-01-08T15:23:19.523411Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 2 ChunkSerNum# 1000 2026-01-08T15:23:19.523433Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 3 ChunkSerNum# 1001 2026-01-08T15:23:19.523445Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 4 ChunkSerNum# 1002 2026-01-08T15:23:19.523457Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 5 ChunkSerNum# 1003 2026-01-08T15:23:19.523469Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 6 ChunkSerNum# 1004 2026-01-08T15:23:19.523483Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 7 ChunkSerNum# 1005 2026-01-08T15:23:19.523495Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 8 ChunkSerNum# 1006 2026-01-08T15:23:19.523528Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 9 ChunkSerNum# 1007 2026-01-08T15:23:19.523553Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 0 2026-01-08T15:23:19.523571Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2026-01-08T15:23:19.524354Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem OffsetInBlocks# 0 IndexInsideChunk# 0 SizeInBlocks# 100 SizeInBytes# 812800 Offset# 0 Size# 812800 End# 812800 Id# 0000000000000000 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2026-01-08T15:23:19.524384Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem entry 2026-01-08T15:23:19.524772Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem OffsetInBlocks# 100 IndexInsideChunk# 1 SizeInBlocks# 215 SizeInBytes# 1747520 Offset# 812800 Size# 1747520 End# 2560320 Id# 0000000000000001 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2026-01-08T15:23:19.524807Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem entry 2026-01-08T15:23:19.524958Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem OffsetInBlocks# 315 IndexInsideChunk# 2 SizeInBlocks# 75 SizeInBytes# 609600 Offset# 2560320 Size# 609600 End# 3169920 Id# 0000000000000002 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2026-01-08T15:23:19.524972Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem entry 2026-01-08T15:23:19.525286Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem OffsetInBlocks# 390 IndexInsideChunk# 3 SizeInBlocks# 159 SizeInBytes# 1292352 Offset# 3169920 Size# 1292352 End# 4462272 Id# 0000000000000003 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2026-01-08T15:23:19.525296Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem entry 2026-01-08T15:23:19.525667Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem OffsetInBlocks# 549 IndexInsideChunk# 4 SizeInBlocks# 185 SizeInBytes# 1503680 Offset# 4462272 Size# 1503680 End# 5965952 Id# 0000000000000004 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2026-01-08T15:23:19.754188Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 0 ApplyBlobWrite Status# OK 2026-01-08T15:23:19.754411Z :B ... ite.cpp:130: [PDisk# 000000001 Writer] QueryId# 497 HandleWrite Lsn# 1184 DataSize# 931109 WriteQueueSize# 1 WriteInProgressItemsSize# 4 2026-01-08T15:24:37.283114Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 4 2026-01-08T15:24:37.283126Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 497 ProcessWriteItem entry 2026-01-08T15:24:37.283327Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 497 ProcessWriteItem OffsetInBlocks# 167 IndexInsideChunk# 2 SizeInBlocks# 115 SizeInBytes# 934720 Offset# 1357376 Size# 934720 End# 2292096 Id# 000000000000002a ChunkIdx# 31 ChunkSerNum# 1097 Defrag# false 2026-01-08T15:24:37.283359Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1185 HandleDelete Ids# [0000000000000011] 2026-01-08T15:24:37.283389Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 29 ChunkSerNum# 1095 Id# 0000000000000011 IndexInsideChunk# 6 SizeInBlocks# 146 Lsn# 750 Owner# 1 SeqNo# 1185 2026-01-08T15:24:37.283406Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 750 Entrypoint# false Virtual# false 2026-01-08T15:24:37.283451Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1186 HandleDelete Ids# [0000000000000032] 2026-01-08T15:24:37.283478Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 30 ChunkSerNum# 1096 Id# 0000000000000032 IndexInsideChunk# 10 SizeInBlocks# 162 Lsn# 751 Owner# 1 SeqNo# 1186 2026-01-08T15:24:37.283491Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 751 Entrypoint# false Virtual# false 2026-01-08T15:24:37.283527Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1187 HandleDelete Ids# [000000000000001f] 2026-01-08T15:24:37.283550Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 30 ChunkSerNum# 1096 Id# 000000000000001f IndexInsideChunk# 3 SizeInBlocks# 159 Lsn# 752 Owner# 1 SeqNo# 1187 2026-01-08T15:24:37.283562Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 752 Entrypoint# false Virtual# false 2026-01-08T15:24:37.287227Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1241129:1188:0] Lsn# 1188 NumReq# 40 2026-01-08T15:24:37.289701Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 493 ApplyBlobWrite Status# OK 2026-01-08T15:24:37.289861Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 4 2026-01-08T15:24:37.289901Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 498 HandleWrite Lsn# 1188 DataSize# 1241129 WriteQueueSize# 1 WriteInProgressItemsSize# 4 2026-01-08T15:24:37.289917Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 4 2026-01-08T15:24:37.289935Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 498 ProcessWriteItem entry 2026-01-08T15:24:37.290219Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 498 ProcessWriteItem OffsetInBlocks# 282 IndexInsideChunk# 3 SizeInBlocks# 153 SizeInBytes# 1243584 Offset# 2292096 Size# 1243584 End# 3535680 Id# 0000000000000017 ChunkIdx# 31 ChunkSerNum# 1097 Defrag# false 2026-01-08T15:24:37.298433Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 41 InFlightWritesSize# 18 2026-01-08T15:24:37.298487Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 0000000000000021 NumReq# 41 2026-01-08T15:24:37.298505Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 42 InFlightWritesSize# 18 2026-01-08T15:24:37.299521Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:771353:1190:0] Lsn# 1190 NumReq# 42 2026-01-08T15:24:37.301027Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 43 InFlightWritesSize# 19 2026-01-08T15:24:37.302570Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1189 HandleDelete Ids# [0000000000000021] 2026-01-08T15:24:37.302626Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 29 ChunkSerNum# 1095 Id# 0000000000000021 IndexInsideChunk# 2 SizeInBlocks# 201 Lsn# 753 Owner# 1 SeqNo# 1189 2026-01-08T15:24:37.302652Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 753 Entrypoint# false Virtual# false 2026-01-08T15:24:37.302729Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 499 HandleWrite Lsn# 1190 DataSize# 771353 WriteQueueSize# 1 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.302746Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.306418Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1415564:1191:0] Lsn# 1191 NumReq# 43 2026-01-08T15:24:37.309172Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 44 InFlightWritesSize# 20 2026-01-08T15:24:37.310402Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 500 HandleWrite Lsn# 1191 DataSize# 1415564 WriteQueueSize# 2 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.310424Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 2 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.314921Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1165695:1192:0] Lsn# 1192 NumReq# 44 2026-01-08T15:24:37.317199Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 45 InFlightWritesSize# 21 2026-01-08T15:24:37.318402Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 501 HandleWrite Lsn# 1192 DataSize# 1165695 WriteQueueSize# 3 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.318423Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 3 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.323537Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1564457:1193:0] Lsn# 1193 NumReq# 45 2026-01-08T15:24:37.325788Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 502 HandleWrite Lsn# 1193 DataSize# 1564457 WriteQueueSize# 4 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.325810Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 4 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.330406Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 746 Status# OK 2026-01-08T15:24:37.330445Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 746 Virtual# false 2026-01-08T15:24:37.330471Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1176 finished Status# OK 2026-01-08T15:24:37.330490Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000009 from lookup table 2026-01-08T15:24:37.330523Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 747 Status# OK 2026-01-08T15:24:37.330535Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 747 Virtual# false 2026-01-08T15:24:37.330549Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1178 finished Status# OK 2026-01-08T15:24:37.330562Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 000000000000000f from lookup table 2026-01-08T15:24:37.330585Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:326: [PDisk# 000000001 Deleter] sending chunk delete ChunkIdx# 28 2026-01-08T15:24:37.330640Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 748 Status# OK 2026-01-08T15:24:37.330655Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 748 Virtual# false 2026-01-08T15:24:37.330670Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 1179 finished Status# OK 2026-01-08T15:24:37.330686Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000025 from lookup table 2026-01-08T15:24:37.330709Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 749 Virtual# true 2026-01-08T15:24:37.330726Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 749 Virtual# true 2026-01-08T15:24:37.330758Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:105: [PDisk# 000000001 Defragmenter] ApplyScan received 2026-01-08T15:24:37.330785Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:289: [PDisk# 000000001 Defragmenter] finishing ChunkIdx# 28 ChunkSerNum# 1094 2026-01-08T15:24:37.332170Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 46 InFlightWritesSize# 22 2026-01-08T15:24:37.338550Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:1579253:1194:0] Lsn# 1194 NumReq# 46 2026-01-08T15:24:37.341643Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 47 InFlightWritesSize# 23 2026-01-08T15:24:37.342404Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 503 HandleWrite Lsn# 1194 DataSize# 1579253 WriteQueueSize# 5 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.342425Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 5 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.344950Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:584806:1195:0] Lsn# 1195 NumReq# 47 2026-01-08T15:24:37.345902Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 504 HandleWrite Lsn# 1195 DataSize# 584806 WriteQueueSize# 6 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.345918Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 6 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.346346Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 48 InFlightWritesSize# 24 2026-01-08T15:24:37.347477Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:831121:1196:0] Lsn# 1196 NumReq# 48 2026-01-08T15:24:37.349095Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 49 InFlightWritesSize# 25 2026-01-08T15:24:37.349902Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:562722:1197:0] Lsn# 1197 NumReq# 49 2026-01-08T15:24:37.350494Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 505 HandleWrite Lsn# 1196 DataSize# 831121 WriteQueueSize# 7 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.350514Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 7 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.350535Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 506 HandleWrite Lsn# 1197 DataSize# 562722 WriteQueueSize# 8 WriteInProgressItemsSize# 5 2026-01-08T15:24:37.350547Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 5 >> VarLengthIntCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::Random32 |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/incrhuge/ut/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed >> VarLengthIntCodec::Random32 [GOOD] >> VarLengthIntCodec::Random64 >> BridgeGet::PartRestorationAcrossBridgeOnRange >> VarLengthIntCodec::Random64 [GOOD] |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::AggregateStat [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/compute/common/ut/unittest >> TCowBTreeTest::MultipleSnapshotsWithClear [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc >> TBsLocalRecovery::MultiPutWriteRestartReadHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest >> TBlobStorageHullWriteSst::LogoBlobMultiSstMultiIndex [GOOD] |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/generic/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] Test command err: RandomSeed# 10015775520569834660 2026-01-08T15:24:39.398472Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639240 Sender# [1:220:36] SessionId# [7:146:1] Cookie# 15221935065328484304 2026-01-08T15:24:39.398545Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:39.398620Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:750} TEvNodeConfigUnbind NodeId# 1 Cookie# 15221935065328484304 SessionId# [7:146:1] Binding# {6.1/12929530514207813051@[7:41:6]} 2026-01-08T15:24:39.398676Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:759} UnbindNode NodeId# 1 Reason# explicit unbind request 2026-01-08T15:24:39.398718Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:39.398762Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:39.398803Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:39.398832Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:39.398862Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:39.398887Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.2:19001/2 2026-01-08T15:24:39.398912Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.5:19001/5 2026-01-08T15:24:39.398950Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 1 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:39.399020Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 1 Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} 2026-01-08T15:24:39.399101Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [2:131:1] Cookie# 14714039449352634514 2026-01-08T15:24:39.399144Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [2:131:1] Inserted# false Subscription# {SessionId# [2:131:1] SubscriptionCookie# 0} NextSubscribeCookie# 5 2026-01-08T15:24:39.405736Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 14714039449352634514 SessionId# [2:131:1] Binding# {1.7/14714039449352634514@[2:131:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:39.405852Z 2 00h00m00.000000s :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006746s 2026-01-08T15:24:39.405915Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [3:134:1] Cookie# 4963534681711710740 2026-01-08T15:24:39.405960Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [3:134:1] Inserted# false Subscription# {SessionId# [3:134:1] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:39.406027Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 4963534681711710740 SessionId# [3:134:1] Binding# {1.7/4963534681711710740@[3:134:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:39.406063Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [1:220:36] SessionId# [5:140:1] Cookie# 4768241381963355952 2026-01-08T15:24:39.406092Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 1 SessionId# [5:140:1] Inserted# false Subscription# {SessionId# [5:140:1] SubscriptionCookie# 0} NextSubscribeCookie# 3 2026-01-08T15:24:39.406144Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 1 Cookie# 4768241381963355952 SessionId# [5:140:1] Binding# {1.7/4768241381963355952@[5:140:1]} Record# {RootNodeId: 1 } 2026-01-08T15:24:39.406263Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [6:255:20] SessionId# [7:41:6] Cookie# 12929530514207813051 2026-01-08T15:24:39.406308Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 6 SessionId# [7:41:6] Inserted# false Subscription# {SessionId# [7:41:6] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:39.406377Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 6 Cookie# 12929530514207813051 SessionId# [7:41:6] Binding# {6.1/12929530514207813051@[7:41:6]} Record# {RootNodeId: 7 } 2026-01-08T15:24:39.406439Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC03@distconf_binding.cpp:403} AbortBinding Binding# {6.1/12929530514207813051@[7:41:6]} Reason# binding cycle 2026-01-08T15:24:39.406519Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC24@distconf_scatter_gather.cpp:125} AbortAllScatterTasks Binding# {6.1/12929530514207813051@[7:41:6]} 2026-01-08T15:24:39.406570Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:345} UnsubscribeInterconnect NodeId# 6 Subscription# {SessionId# [7:41:6] SubscriptionCookie# 0} 2026-01-08T15:24:39.406615Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 6 2026-01-08T15:24:39.406662Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:188} Initiated bind NodeId# 5 Binding# {5.0/12929530514207813052@[0:0:0]} SessionId# [0:0:0] 2026-01-08T15:24:39.406727Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [6:255:20] SessionId# [8:44:6] Cookie# 15203491471098949250 2026-01-08T15:24:39.406770Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 6 SessionId# [8:44:6] Inserted# false Subscription# {SessionId# [8:44:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:39.406818Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 6 Cookie# 15203491471098949250 SessionId# [8:44:6] Binding# {6.1/15203491471098949250@[8:44:6]} Record# {RootNodeId: 7 } 2026-01-08T15:24:39.407254Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 131077 Sender# [7:58:5] SessionId# [0:0:0] Cookie# 6 2026-01-08T15:24:39.407297Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:223} TEvNodeConnected NodeId# 5 SessionId# [7:58:5] Cookie# 6 CookieInFlight# true SubscriptionExists# true 2026-01-08T15:24:39.407358Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC09@distconf_binding.cpp:259} Continuing bind Binding# {5.0/12929530514207813052@[0:0:0]} 2026-01-08T15:24:39.407420Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639238 Sender# [7:262:20] SessionId# [6:40:6] Cookie# 12929530514207813051 2026-01-08T15:24:39.407453Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 7 SessionId# [6:40:6] Inserted# false Subscription# {SessionId# [6:40:6] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:39.407664Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:611} TEvNodeConfigPush NodeId# 7 Cookie# 12929530514207813051 SessionId# [6:40:6] Binding# {5.7/5022329919623629555@[6:55:5]} Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.8" Port: 19001 NodeId: 8 } DeletedBoundNodeIds { Host: "127.0.0.2" Port: 19001 NodeId: 2 } DeletedBoundNodeIds { Host: "127.0.0.5" Port: 19001 NodeId: 5 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } RootNodeId# 7 StorageConfigGeneration# 0 KnownNode# true 2026-01-08T15:24:39.407715Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 7 SessionId# [6:40:6] Inserted# false Subscription# {SessionId# [6:40:6] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:39.407763Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.6:19001/6 2026-01-08T15:24:39.407804Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.9:19001/9 2026-01-08T15:24:39.407839Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.4:19001/4 2026-01-08T15:24:39.407880Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.1:19001/1 2026-01-08T15:24:39.407922Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.8:19001/8 2026-01-08T15:24:39.407960Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.2:19001/2 2026-01-08T15:24:39.408001Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.5:19001/5 2026-01-08T15:24:39.408031Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:551} DeleteBound RefererNodeId# 7 NodeId# 127.0.0.3:19001/3 2026-01-08T15:24:39.408119Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [9:127:2] Cookie# 10081380510380536539 2026-01-08T15:24:39.408153Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [9:127:2] Inserted# false Subscription# {SessionId# [9:127:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:39.408208Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 10081380510380536539 SessionId# [9:127:2] Binding# {2.7/10081380510380536539@[9:127:2]} Record# {RootNodeId: 1 } 2026-01-08T15:24:39.408249Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [2:227:20] SessionId# [4:112:2] Cookie# 6732035953646069320 2026-01-08T15:24:39.408378Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 2 SessionId# [4:112:2] Inserted# false Subscription# {SessionId# [4:112:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2026-01-08T15:24:39.408467Z 4 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 2 Cookie# 6732035953646069320 SessionId# [4:112:2] Binding# {2.7/6732035953646069320@[4:112:2]} Record# {RootNodeId: 1 } 2026-01-08T15:24:39.408507Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639239 Sender# [5:248:20] SessionId# [6:55:5] Cookie# 5022329919623629555 2026-01-08T15:24:39.408539Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:373} SubscribeToPeerNode NodeId# 5 SessionId# [6:55:5] Inserted# false Subscription# {SessionId# [6:55:5] SubscriptionCookie# 0} NextSubscribeCookie# 4 2026-01-08T15:24:39.408578Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:437} TEvNodeConfigReversePush NodeId# 5 Cookie# 5022329919623629555 SessionId# [6:55:5] Binding# {5.7/5022329919623629555@[6:55:5]} Record# {RootNodeId: 1 } 2026-01-08T15:24: ... 00h00m00.831012s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.465852Z 1 00h00m00.831012s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.465880Z 1 00h00m00.831012s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.465906Z 1 00h00m00.831012s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.465931Z 1 00h00m00.831012s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.465963Z 1 00h00m00.831012s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.465994Z 1 00h00m00.831012s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.466643Z 1 00h00m01.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 65538 Sender# [0:0:0] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:24:39.467099Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:39.467179Z 1 00h00m01.770922s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:39.467217Z 1 00h00m01.770922s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:39.467242Z 1 00h00m01.770922s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:39.467287Z 1 00h00m01.770922s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.467312Z 1 00h00m01.770922s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.467350Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.467400Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.467446Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.467478Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.467505Z 1 00h00m01.770922s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.469369Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:39.469442Z 1 00h00m03.857522s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:39.469475Z 1 00h00m03.857522s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:39.469504Z 1 00h00m03.857522s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:39.469530Z 1 00h00m03.857522s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.469561Z 1 00h00m03.857522s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.469605Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.469651Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.469679Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.469719Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.469767Z 1 00h00m03.857522s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.473709Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:39.473804Z 1 00h00m08.197650s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:39.473843Z 1 00h00m08.197650s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:39.473890Z 1 00h00m08.197650s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:39.473930Z 1 00h00m08.197650s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.473964Z 1 00h00m08.197650s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.474000Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.474042Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.474075Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.474115Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.474149Z 1 00h00m08.197650s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.476733Z 1 00h00m10.002048s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 2026-01-08T15:24:39.476844Z 1 00h00m10.002048s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 1 2026-01-08T15:24:39.482557Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:39.482680Z 1 00h00m16.877906s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:39.482728Z 1 00h00m16.877906s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:39.482756Z 1 00h00m16.877906s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:39.482784Z 1 00h00m16.877906s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.482833Z 1 00h00m16.877906s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.482882Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.482936Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.482974Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.483006Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.483035Z 1 00h00m16.877906s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.486587Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 5 ClusterStateGuid: 6 2026-01-08T15:24:39.495527Z 1 00h00m30.174507s :BS_NODE INFO: {NW51@node_warden_resource.cpp:334} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 5 ClusterStateGuid: 6 2026-01-08T15:24:39.499634Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:24:39.499754Z 1 00h00m35.106443s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:24:39.499806Z 1 00h00m35.106443s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:24:39.499836Z 1 00h00m35.106443s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:24:39.499890Z 1 00h00m35.106443s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 3} 2026-01-08T15:24:39.499917Z 1 00h00m35.106443s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 4} 2026-01-08T15:24:39.499967Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.500033Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.500066Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.500092Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:24:39.500118Z 1 00h00m35.106443s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::Random64 [GOOD] >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> TS3WrapperTests::UploadUnknownPart >> BsControllerTest::SelfHealMirror3dc [GOOD] |84.0%| [TA] $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.0%| [TA] $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsDbStat::ChaoticParallelWrite_DbStat [GOOD] >> TBsHuge::Simple >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] |84.0%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TSubscriberCombinationsTest::CombinationsRootDomain [GOOD] >> TSubscriberCombinationsTest::MigratedPathRecreation >> TS3WrapperTests::UploadUnknownPart [GOOD] >> TMonitoringTests::InvalidActorId |84.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.1%| [TA] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TMonitoringTests::ValidActorId >> TMonitoringTests::InvalidActorId [GOOD] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageHullDecimal::TestRoundToInt [GOOD] >> TBlobStorageHullDecimal::TestToUi64 [GOOD] >> TBlobStorageDiskBlob::CreateFromDistinctParts >> TBlobStorageDiskBlob::CreateFromDistinctParts [GOOD] >> TBlobStorageDiskBlob::CreateIterate [GOOD] |84.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSubscriberCombinationsTest::MigratedPathRecreation [GOOD] >> TSubscriberCombinationsTest::CombinationsMigratedPath >> TMonitoringTests::ValidActorId [GOOD] |84.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} >> THullDsHeapItTest::HeapForwardIteratorAllEntities [GOOD] >> DqSpillingFileTests::MultipleFileParts >> THullDsHeapItTest::HeapBackwardIteratorAllEntities >> DqSpillingFileTests::MultipleFileParts [GOOD] >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TBlobStorageDiskBlob::Merge [GOOD] >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} |84.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] Test command err: 2026-01-08T15:24:18.553786Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:3:0] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.553862Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:1:0] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.553892Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:0:1] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.553920Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:3:1] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.553953Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:1:1] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.553980Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:2:1] targetVDisk# [0:1:0:0:0] 2026-01-08T15:24:18.554008Z :BS_SYNCLOG ERROR: blobstorage_synclog.cpp:161: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Handle(TEvSyncLogRead): locked; sourceVDisk# [0:1:0:2:0] targetVDisk# [0:1:0:0:0] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] Test command err: RandomSeed# 4027081691382806092 2026-01-08T15:24:41.442811Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.442907Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.442945Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.442980Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.443023Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.443066Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.443102Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.443137Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.444334Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444415Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444477Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444550Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444602Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444663Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444708Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444756Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.444841Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.444897Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.444933Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.444967Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.445003Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.445034Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.445071Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.445117Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:41.447088Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447164Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447207Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447249Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447293Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447344Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447390Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.447447Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:41.633471Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:1:0]} 2026-01-08T15:24:41.633545Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:2:0]} 2026-01-08T15:24:41.633591Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:3:0]} 2026-01-08T15:24:41.633634Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:4:0]} 2026-01-08T15:24:41.633676Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:5:0]} 2026-01-08T15:24:41.633717Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:6:0]} 2026-01-08T15:24:41.633759Z 1 00h01m30.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidProxyBase::Handle(TEvVSyncGuidResult): NOT OK: msg# {TEvVSyncGuidResult: Status# VDISK_ERROR_STATE VDiskId# [82000000:1:0:7:0]} >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk [GOOD] >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> THullDsHeapItTest::HeapAppendixTreeForwardIteratorBenchmark [GOOD] >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::UploadUnknownPart [GOOD] Test command err: 2026-01-08T15:24:41.501182Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# BC9E16D0-928A-4BD1-850A-A98325CD7250, request# UploadPart { Bucket: TEST Key: key UploadId: uploadId PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:4979 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D4A01861-FBB3-4D1F-8387-39EC87574DD8 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-157-generic x86_64 Clang/20.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=uploadId / 4 2026-01-08T15:24:41.529800Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# BC9E16D0-928A-4BD1-850A-A98325CD7250, response# >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc [GOOD] >> TCowBTreeTest::DuplicateKeysInplace |84.1%| [TS] {BAZEL_UPLOAD} ydb/core/wrappers/ut/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestToUi64 [GOOD] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> ReadOnlyVDisk::TestSync [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageDiskBlob::CreateIterate [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 9177998403441049629 2026-01-08T15:24:42.220102Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220232Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220282Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220324Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220371Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220410Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.220466Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.221892Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222003Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222076Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222137Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222187Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222240Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222305Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.222438Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222499Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222545Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222625Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222674Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222725Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.222785Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2026-01-08T15:24:42.224742Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.224831Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.224879Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.224955Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.225008Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.225062Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2026-01-08T15:24:42.225113Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::MultipleFileParts [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> TBlobStorageHullDecimal::TestMkDecimal [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapBackwardIteratorAllEntities [GOOD] >> TBsHuge::Simple [GOOD] >> TBsHuge::SimpleErasureNone >> TCowBTreeTest::DuplicateKeysInplace [GOOD] >> TCowBTreeTest::DuplicateKeysThreadSafe >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_monitoring/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealMirror3dc [GOOD] Test command err: 2026-01-08T15:24:08.756877Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2026-01-08T15:24:08.756928Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2026-01-08T15:24:08.757028Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2026-01-08T15:24:08.757054Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2026-01-08T15:24:08.757097Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2026-01-08T15:24:08.757120Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2026-01-08T15:24:08.757163Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2026-01-08T15:24:08.757185Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2026-01-08T15:24:08.757219Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2026-01-08T15:24:08.757241Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2026-01-08T15:24:08.757277Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2026-01-08T15:24:08.757300Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2026-01-08T15:24:08.757336Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2026-01-08T15:24:08.757370Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2026-01-08T15:24:08.757407Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2026-01-08T15:24:08.757462Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2026-01-08T15:24:08.757511Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2026-01-08T15:24:08.757536Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2026-01-08T15:24:08.757635Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2026-01-08T15:24:08.757659Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2026-01-08T15:24:08.757693Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2026-01-08T15:24:08.757716Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2026-01-08T15:24:08.757750Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2026-01-08T15:24:08.757772Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2026-01-08T15:24:08.757826Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2026-01-08T15:24:08.757849Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2026-01-08T15:24:08.757889Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2026-01-08T15:24:08.757911Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2026-01-08T15:24:08.757951Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2026-01-08T15:24:08.757986Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2026-01-08T15:24:08.758027Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2026-01-08T15:24:08.758050Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2026-01-08T15:24:08.758087Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2026-01-08T15:24:08.758108Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2026-01-08T15:24:08.758145Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2026-01-08T15:24:08.758167Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2026-01-08T15:24:08.758205Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2026-01-08T15:24:08.758227Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2026-01-08T15:24:08.758263Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2026-01-08T15:24:08.758284Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2026-01-08T15:24:08.758340Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2026-01-08T15:24:08.758362Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2026-01-08T15:24:08.758400Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2026-01-08T15:24:08.758422Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2026-01-08T15:24:08.758455Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2026-01-08T15:24:08.758500Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2026-01-08T15:24:08.758542Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2026-01-08T15:24:08.758564Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2026-01-08T15:24:08.758599Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2026-01-08T15:24:08.758624Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2026-01-08T15:24:08.758670Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2026-01-08T15:24:08.758694Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2026-01-08T15:24:08.758729Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2026-01-08T15:24:08.758757Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2026-01-08T15:24:08.758797Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2026-01-08T15:24:08.758819Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2026-01-08T15:24:08.758857Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2026-01-08T15:24:08.758878Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2026-01-08T15:24:08.758925Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2026-01-08T15:24:08.758948Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2026-01-08T15:24:08.758986Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2026-01-08T15:24:08.759018Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2026-01-08T15:24:08.759060Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2026-01-08T15:24:08.759082Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2026-01-08T15:24:08.759115Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2026-01-08T15:24:08.759136Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2026-01-08T15:24:08.759182Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2026-01-08T15:24:08.759207Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2026-01-08T15:24:08.759251Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2026-01-08T15:24:08.759277Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2026-01-08T15:24:08.759311Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2026-01-08T15:24:08.759333Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2026-01-08T15:24:08.803867Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2719:59] Status# ERROR ClientId# [1:2719:59] ServerId# [0:0:0] PipeClient# [1:2719:59] 2026-01-08T15:24:08.805506Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2720:41] Status# ERROR ClientId# [2:2720:41] ServerId# [0:0:0] PipeClient# [2:2720:41] 2026-01-08T15:24:08.805573Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2721:41] Status# ERROR ClientId# [3:2721:41] ServerId# [0:0:0] PipeClient# [3:2721:41] 2026-01-08T15:24:08.805615Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2722:41] Status# ERROR ClientId# [4:2722:41] ServerId# [0:0:0] PipeClient# [4:2722:41] 2026-01-08T15:24:08.805663Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2723:41] Status# ERROR ClientId# [5:2723:41] ServerId# [0:0:0] PipeClient# [5:2723:41] 2026-01-08T15:24:08.805705Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2724:41] Status# ERROR ClientId# [6:2724:41] ServerId# [0:0:0] PipeClient# [6:2724:41] 2026-01-08T15:24:08.805749Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2725:41] Status# ERROR ClientId# [7:2725:41] ServerId# [0:0:0] PipeClient# [7:2725:41] 2026-01-08T15:24:08.805789Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2726:41] Status# ERROR ClientId# [8:2726:41] ServerId# [0:0:0] PipeClient# [8:2726:41] 2026-01-08T15:24:08.805835Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2727:41] Status# ERROR ClientId# [9:2727:41] ServerId# [0:0:0] PipeClient# [9:2727:41] 2026-01-08T15:24:08.805875Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2728:41] Status# ERROR ClientId# [10:2728:41] ServerId# [0:0:0] PipeClient# [10:2728:41] 2026-01-08T15:24:08.805912Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2729:41] Status# ERROR ClientId# [11:2729:41] ServerId# [0:0:0] PipeClient# [11:2729:41] 2026-01-08T15:24:08.805948Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2730:41] Status# ERROR ClientId# [12:2730:41] ServerId# [0:0:0] PipeClient# [12:2730:41] 2026-01-08T15:24:08.805996Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2731:41] Status# ERROR ClientId# [13:2731:41] ServerId# [0:0:0] PipeClient# [13:2731:41] 2026-01-08T15:24:08.806066Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2732:41] Status# ERROR ClientId# [14:2732:41] ServerId# [0:0:0] PipeClient# [14:2732:41] 2026-01-08T15:24:08.806122Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2733:41] Status# ERROR ClientId# [15:2733:41] ServerId# [0:0:0] PipeClient# [15:2733:41] 2026-01-08T15:24:08.806304Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2734:41] Status# ERROR ClientId# [16:2734:41] ServerId# [0:0:0] PipeClient# [16:2734:41] 2026-01-08T15:24:08.806373Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2735:41] Status# ERROR ClientId# [17:2735:41] ServerId# [0:0:0] PipeClient# [17:2735:41] 2026-01-08T15:24:08.806429Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2736:41] Status# ERROR ClientId# [18:2736:41] ServerId# [0:0:0] PipeClient# [18:2736:41] 2026-01-08T15:24:08.806478Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2737:41] Status# ERROR ClientId# [19:2737:41] ServerId# [0:0:0] PipeClient# [19:2737:41] 2026-01-08T15:24:08.806518Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2738:41] Status# ERROR ClientId# [20:2738:41] ServerId# [0:0:0] PipeClient# [20:2738:41] 2026-01-08T15:24:08.806560Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2739:41] Status# ERROR ClientId# [21:2739:41] ServerId# [0:0:0] PipeClient# [21:2739:41] 2026-01-08T15:24:08.806621Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2740:41] Status# ERROR ClientId# [22:2740:41] ServerId# [0:0:0] PipeClient# [22:2740:41] 2026-01-08T15:24:08.806669Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2741:41] Status# ERROR ClientId# [23:2741:41] ServerId# [0:0:0] PipeClient# [23:2741:41] 2026-01-08T15:24:08.806709Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2742:41] Status# ERROR ClientId# [24:2742:41] ServerId# [0:0:0] PipeClient# [24:2742:41] 2026-01-08T15:24:08.806751Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2743:41] Status# ERROR ClientId# [25:2743:41] ServerId# [0:0:0] PipeClient# [25:2743:41] 2026-01-08T15:24:08.806794Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2744:41] Status# ERROR ClientId# [26:2744:41] ServerId# [0:0:0] PipeClient# [26:2744:41] 2026-01-08T15:24:08.806848Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2745:41] Status# ERROR ClientId# [27:2745:41] ServerId# [0:0:0] PipeClient# [27:2745:41] 2026-01-08T15:24:08.806892Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2746:41] Status# ERROR ClientId# [28:2746:41] ServerId# [0:0:0] PipeClient# [28:2746:41] 2026-01-08T15:24:08.806958Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2747:41] Status# ERROR ClientId# [29:2747:41] ServerId# [0:0:0] PipeClient# [29:2747:41] 2026-01-08T15:24:08.807020Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2748:41] Status# ERROR ClientId# [30:2748:41] ServerId# [0:0:0] PipeClient# [30:2748:41] 2026-01-08T15:24:08.807090Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2749:41] Status# ERROR ClientId# [31:2749:41] ServerId# [0:0:0] PipeClient# [31:2749:41] 2026-01-08T15:24:08.807136Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2750:41] Status# ERROR ClientId# [32:2750:41] ServerId# [0:0:0] PipeClient# [32:2750:41] 2026-01-08T15:24:08.807177Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2751:41] Status# ERROR ClientId# [33:2751:41] ServerId# [0:0:0] PipeClient# [33:2751:41] 2026-01-08T15:24:08.807217Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2752:41] Status# ERROR ClientId# [34:2752:41] ServerId# [0:0:0] PipeClient# [34:2752:41] 2026-01-08T15:24:08.807262Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2753:41] Status# ERROR ClientId# [35:2753:41 ... # [8000004b:3:2:2:0] DiskIsOk# true 2026-01-08T15:24:39.859869Z 1 05h45m00.123040s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483723 Items# [8000004b:3:1:1:0]: 15:1002:1008 -> 15:1003:1008 ConfigTxSeqNo# 523 2026-01-08T15:24:39.859922Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483723 Success# true 2026-01-08T15:24:39.860088Z 18 05h45m00.123040s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2026-01-08T15:24:39.860157Z 18 05h45m00.123040s :BS_NODE DEBUG: [18] VDiskId# [8000004b:3:1:0:0] -> [8000004b:4:1:0:0] 2026-01-08T15:24:39.860266Z 36 05h45m00.123040s :BS_NODE DEBUG: [36] NodeServiceSetUpdate 2026-01-08T15:24:39.860316Z 36 05h45m00.123040s :BS_NODE DEBUG: [36] VDiskId# [8000004b:3:2:2:0] -> [8000004b:4:2:2:0] 2026-01-08T15:24:39.860406Z 6 05h45m00.123040s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2026-01-08T15:24:39.860456Z 6 05h45m00.123040s :BS_NODE DEBUG: [6] VDiskId# [8000004b:3:0:0:0] -> [8000004b:4:0:0:0] 2026-01-08T15:24:39.860550Z 24 05h45m00.123040s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2026-01-08T15:24:39.860599Z 24 05h45m00.123040s :BS_NODE DEBUG: [24] VDiskId# [8000004b:3:1:2:0] -> [8000004b:4:1:2:0] 2026-01-08T15:24:39.860715Z 9 05h45m00.123040s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2026-01-08T15:24:39.862365Z 9 05h45m00.123040s :BS_NODE DEBUG: [9] VDiskId# [8000004b:3:0:1:0] -> [8000004b:4:0:1:0] 2026-01-08T15:24:39.870380Z 12 05h45m00.123040s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2026-01-08T15:24:39.870476Z 12 05h45m00.123040s :BS_NODE DEBUG: [12] VDiskId# [8000004b:3:0:2:0] -> [8000004b:4:0:2:0] 2026-01-08T15:24:39.870625Z 30 05h45m00.123040s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2026-01-08T15:24:39.870678Z 30 05h45m00.123040s :BS_NODE DEBUG: [30] VDiskId# [8000004b:3:2:0:0] -> [8000004b:4:2:0:0] 2026-01-08T15:24:39.870790Z 15 05h45m00.123040s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.870835Z 15 05h45m00.123040s :BS_NODE DEBUG: [15] VDiskId# [8000004b:4:1:1:0] PDiskId# 1003 VSlotId# 1008 created 2026-01-08T15:24:39.870929Z 15 05h45m00.123040s :BS_NODE DEBUG: [15] VDiskId# [8000004b:4:1:1:0] status changed to INIT_PENDING 2026-01-08T15:24:39.871045Z 33 05h45m00.123040s :BS_NODE DEBUG: [33] NodeServiceSetUpdate 2026-01-08T15:24:39.871095Z 33 05h45m00.123040s :BS_NODE DEBUG: [33] VDiskId# [8000004b:3:2:1:0] -> [8000004b:4:2:1:0] 2026-01-08T15:24:39.871430Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:76} Reassigner starting GroupId# 2147483757 2026-01-08T15:24:39.872181Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872226Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:0:0] DiskIsOk# true 2026-01-08T15:24:39.872265Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872295Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:1:0] DiskIsOk# true 2026-01-08T15:24:39.872324Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872353Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:2:0] DiskIsOk# true 2026-01-08T15:24:39.872381Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872409Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:1:0:0] DiskIsOk# true 2026-01-08T15:24:39.872436Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872465Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:1:2:0] DiskIsOk# true 2026-01-08T15:24:39.872494Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872521Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:0:0] DiskIsOk# true 2026-01-08T15:24:39.872549Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872577Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:1:0] DiskIsOk# true 2026-01-08T15:24:39.872604Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:116} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2026-01-08T15:24:39.872631Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:101} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:2:0] DiskIsOk# true 2026-01-08T15:24:39.887099Z 1 05h45m00.123552s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:214} Reassigner succeeded GroupId# 2147483757 Items# [8000006d:1:1:1:0]: 15:1002:1006 -> 21:1000:1014 ConfigTxSeqNo# 524 2026-01-08T15:24:39.887154Z 1 05h45m00.123552s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:225} Reassigner finished GroupId# 2147483757 Success# true 2026-01-08T15:24:39.887327Z 18 05h45m00.123552s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2026-01-08T15:24:39.887410Z 18 05h45m00.123552s :BS_NODE DEBUG: [18] VDiskId# [8000006d:1:1:2:0] -> [8000006d:2:1:2:0] 2026-01-08T15:24:39.887518Z 36 05h45m00.123552s :BS_NODE DEBUG: [36] NodeServiceSetUpdate 2026-01-08T15:24:39.887568Z 36 05h45m00.123552s :BS_NODE DEBUG: [36] VDiskId# [8000006d:1:2:0:0] -> [8000006d:2:2:0:0] 2026-01-08T15:24:39.887661Z 3 05h45m00.123552s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2026-01-08T15:24:39.887710Z 3 05h45m00.123552s :BS_NODE DEBUG: [3] VDiskId# [8000006d:1:0:1:0] -> [8000006d:2:0:1:0] 2026-01-08T15:24:39.887799Z 21 05h45m00.123552s :BS_NODE DEBUG: [21] NodeServiceSetUpdate 2026-01-08T15:24:39.887837Z 21 05h45m00.123552s :BS_NODE DEBUG: [21] VDiskId# [8000006d:2:1:1:0] PDiskId# 1000 VSlotId# 1014 created 2026-01-08T15:24:39.887907Z 21 05h45m00.123552s :BS_NODE DEBUG: [21] VDiskId# [8000006d:2:1:1:0] status changed to INIT_PENDING 2026-01-08T15:24:39.888007Z 6 05h45m00.123552s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2026-01-08T15:24:39.888053Z 6 05h45m00.123552s :BS_NODE DEBUG: [6] VDiskId# [8000006d:1:0:2:0] -> [8000006d:2:0:2:0] 2026-01-08T15:24:39.888137Z 24 05h45m00.123552s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2026-01-08T15:24:39.888179Z 24 05h45m00.123552s :BS_NODE DEBUG: [24] VDiskId# [8000006d:1:1:0:0] -> [8000006d:2:1:0:0] 2026-01-08T15:24:39.888263Z 27 05h45m00.123552s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2026-01-08T15:24:39.888313Z 27 05h45m00.123552s :BS_NODE DEBUG: [27] VDiskId# [8000006d:1:2:1:0] -> [8000006d:2:2:1:0] 2026-01-08T15:24:39.888393Z 12 05h45m00.123552s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2026-01-08T15:24:39.888438Z 12 05h45m00.123552s :BS_NODE DEBUG: [12] VDiskId# [8000006d:1:0:0:0] -> [8000006d:2:0:0:0] 2026-01-08T15:24:39.888524Z 30 05h45m00.123552s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2026-01-08T15:24:39.888571Z 30 05h45m00.123552s :BS_NODE DEBUG: [30] VDiskId# [8000006d:1:2:2:0] -> [8000006d:2:2:2:0] 2026-01-08T15:24:39.888635Z 15 05h45m00.123552s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.889885Z 21 05h45m01.504504s :BS_NODE DEBUG: [21] VDiskId# [8000001d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.894690Z 21 05h45m02.002016s :BS_NODE DEBUG: [21] VDiskId# [8000000d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.895313Z 14 05h45m02.402528s :BS_NODE DEBUG: [14] VDiskId# [8000007d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.895699Z 21 05h45m03.979480s :BS_NODE DEBUG: [21] VDiskId# [8000003d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.896248Z 21 05h45m04.943456s :BS_NODE DEBUG: [21] VDiskId# [8000005d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.897829Z 21 05h45m05.224968s :BS_NODE DEBUG: [21] VDiskId# [8000004d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.902640Z 21 05h45m05.376552s :BS_NODE DEBUG: [21] VDiskId# [8000006d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.903379Z 21 05h45m05.494992s :BS_NODE DEBUG: [21] VDiskId# [8000002d:2:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.904152Z 15 05h45m05.710040s :BS_NODE DEBUG: [15] VDiskId# [8000004b:4:1:1:0] status changed to REPLICATING 2026-01-08T15:24:39.905060Z 21 05h45m13.224968s :BS_NODE DEBUG: [21] VDiskId# [8000004d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.914591Z 15 05h45m13.225480s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.914658Z 15 05h45m13.225480s :BS_NODE DEBUG: [15] VDiskId# [8000004d:1:1:1:0] destroyed 2026-01-08T15:24:39.915730Z 21 05h45m20.473480s :BS_NODE DEBUG: [21] VDiskId# [8000003d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.917100Z 15 05h45m20.473992s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.917152Z 15 05h45m20.473992s :BS_NODE DEBUG: [15] VDiskId# [8000003d:1:1:1:0] destroyed 2026-01-08T15:24:39.917293Z 21 05h45m20.487016s :BS_NODE DEBUG: [21] VDiskId# [8000000d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.918398Z 15 05h45m20.487528s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.918446Z 15 05h45m20.487528s :BS_NODE DEBUG: [15] VDiskId# [8000000d:1:1:1:0] destroyed 2026-01-08T15:24:39.918969Z 15 05h45m26.553040s :BS_NODE DEBUG: [15] VDiskId# [8000004b:4:1:1:0] status changed to READY 2026-01-08T15:24:39.920056Z 15 05h45m26.553552s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.920106Z 15 05h45m26.553552s :BS_NODE DEBUG: [15] VDiskId# [8000004b:3:1:1:0] destroyed 2026-01-08T15:24:39.920271Z 21 05h45m27.295992s :BS_NODE DEBUG: [21] VDiskId# [8000002d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.921472Z 15 05h45m27.296504s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.921519Z 15 05h45m27.296504s :BS_NODE DEBUG: [15] VDiskId# [8000002d:1:1:1:0] destroyed 2026-01-08T15:24:39.921640Z 21 05h45m27.827456s :BS_NODE DEBUG: [21] VDiskId# [8000005d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.926895Z 15 05h45m27.827968s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.926945Z 15 05h45m27.827968s :BS_NODE DEBUG: [15] VDiskId# [8000005d:1:1:1:0] destroyed 2026-01-08T15:24:39.928118Z 14 05h45m32.159528s :BS_NODE DEBUG: [14] VDiskId# [8000007d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.928891Z 15 05h45m32.160040s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.928936Z 15 05h45m32.160040s :BS_NODE DEBUG: [15] VDiskId# [8000007d:1:1:1:0] destroyed 2026-01-08T15:24:39.929160Z 21 05h45m33.010552s :BS_NODE DEBUG: [21] VDiskId# [8000006d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.930208Z 15 05h45m33.011064s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.930254Z 15 05h45m33.011064s :BS_NODE DEBUG: [15] VDiskId# [8000006d:1:1:1:0] destroyed 2026-01-08T15:24:39.939706Z 21 05h45m35.625504s :BS_NODE DEBUG: [21] VDiskId# [8000001d:2:1:1:0] status changed to READY 2026-01-08T15:24:39.941073Z 15 05h45m35.626016s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2026-01-08T15:24:39.941131Z 15 05h45m35.626016s :BS_NODE DEBUG: [15] VDiskId# [8000001d:1:1:1:0] destroyed |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_selfheal/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest >> THullDsHeapItTest::HeapAppendixTreeBackwardIteratorBenchmark [GOOD] |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay/ydb_query_replay |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/base/ut/unittest |84.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay |84.2%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestSync [GOOD] Test command err: RandomSeed# 17375604076748225812 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:0:0:0:131072:0] 2026-01-08T15:24:11.713411Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:8828:947] 2026-01-08T15:24:11.713888Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8835:954] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:1:0:0:32768:0] 2026-01-08T15:24:15.899121Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:8842:961] 2026-01-08T15:24:15.899313Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8835:954] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2026-01-08T15:24:22.885935Z 5 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8856:975] 2026-01-08T15:24:22.886039Z 4 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:8849:968] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2026-01-08T15:24:29.054138Z 6 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8863:982] 2026-01-08T15:24:29.054238Z 5 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8856:975] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2026-01-08T15:24:33.803969Z 7 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8870:989] 2026-01-08T15:24:33.804072Z 6 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8863:982] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:6:0:0:131072:0] 2026-01-08T15:24:40.012446Z 7 00h26m00.561536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8870:989] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 7 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] |84.2%| [TA] $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] {RESULT} $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> TBsHuge::SimpleErasureNone [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TQueryResultSizeTrackerTest::CheckAll [GOOD] >> TCowBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TCowBTreeTest::IteratorDestructor [GOOD] >> TCowBTreeTest::Concurrent |84.2%| [TA] $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} >> Donor::SlayAfterWiping >> TActorTest::TestStateSwitch [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] Test command err: 2026-01-08T15:24:45.786898Z :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest |84.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.2%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TActorTest::TestDie [GOOD] >> TActorTest::TestFilteredGrab >> TActorTest::TestHandleEvent [GOOD] >> TActorTest::TestGetCtxTime >> TCowBTreeTest::Concurrent [GOOD] |84.2%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> TActorTest::TestWaitFor >> TActorTest::TestGetCtxTime [GOOD] >> BSCStopPDisk::PDiskStop [GOOD] >> TBlobStorageHullCompactDeferredQueueTest::Basic >> TCowBTreeTest::Alignment [GOOD] >> TActorTest::TestWaitFor [GOOD] >> TActorTest::TestFilteredGrab [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> ReadBatcher::Range >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestStateSwitch [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] >> SysViewQueryHistory::TopDurationAdd [GOOD] |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckAll [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TBsVDiskOutOfSpace::WriteUntilYellowZone [GOOD] >> TBsVDiskRange::RangeGetFromEmptyDB >> TBlobStorageBarriersTreeTest::Tree [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestGetCtxTime [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::CheckOnlyQueryResult [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestFilteredGrab [GOOD] |84.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFor [GOOD] Test command err: ... waiting for value = 42 ... waiting for value = 42 (done) |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> SysViewQueryHistory::AggrMergeDedup [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopDurationAdd [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop [GOOD] Test command err: RandomSeed# 5489188717509373559 |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::Tree [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMergeDedup [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> SysViewQueryHistory::AddDedup [GOOD] >> SysViewQueryHistory::AddDedup2 [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TBlobStorageBarriersTreeTest::MemViewSnapshots [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> Scheme::YqlTypesMustBeDefined [GOOD] >> SchemeBorders::Full >> Donor::SkipBadDonor >> SchemeBorders::Full [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TBsVDiskRange::RangeGetFromEmptyDB [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedup2 [GOOD] |84.3%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> Scheme::EmptyOwnedCellVec [GOOD] >> Scheme::NonEmptyOwnedCellVec [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/service/ut/unittest |84.3%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |84.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/util/ut/unittest >> TCowBTreeTest::Alignment [GOOD] Test command err: Producer 0 worked for 0.3343159203 seconds Producer 1 worked for 0.3246678173 seconds Consumer 0 worked for 0.3617820121 seconds on a snapshot of size 20000 Consumer 1 worked for 0.3736659953 seconds on a snapshot of size 40000 Consumer 2 worked for 0.5378420531 seconds on a snapshot of size 60000 Consumer 3 worked for 0.6229428579 seconds on a snapshot of size 80000 Consumers had 1199994 successful seeks |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut/unittest >> Scheme::TSerializedCellVec [GOOD] >> Scheme::UnsafeAppend [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/util/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut/unittest >> Scheme::EmptyCell [GOOD] >> Scheme::CompareWithNullSemantics [GOOD] >> TCircleBufTest::SimpleTest [GOOD] >> TCircleBufTest::PtrTest [GOOD] >> TBlobStorageSyncNeighborsTest::IterateOverAllDisks [GOOD] >> TLsnAllocTrackerTests::Test1 [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse >> TBlobStorageSyncNeighborsTest::SerDes [GOOD] >> TBlobStorageSyncNeighborsTest::CheckVDiskIterators [GOOD] >> TCircleBufStringStreamTest::TestAligned [GOOD] >> Scheme::CellVecTryParse |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> SchemeBorders::Full [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> Scheme::CellVecTryParse [GOOD] >> Scheme::CompareOrder [GOOD] >> Scheme::CompareUuidCells [GOOD] >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] >> Scheme::NullCell [GOOD] >> Scheme::NotEmptyCell [GOOD] >> TypesProto::DecimalNoTypeInfo [GOOD] >> TypesProto::Decimal35 [GOOD] |84.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::NonEmptyOwnedCellVec [GOOD] |84.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::UnsafeAppend [GOOD] Test command err: Serialize: 0.001090s Cells constructor: 0.003310s Parse: 0.000174s Copy: 0.000151s Move: 0.000112s |84.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest >> TVDiskConfigTest::RtmrProblem1 [GOOD] >> TVDiskConfigTest::RtmrProblem2 [GOOD] >> TVDiskConfigTest::ThreeLevels [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TCircleBufStringStreamTest::TestAligned [GOOD] |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CompareWithNullSemantics [GOOD] >> Scheme::OwnedCellVecFromSerialized [GOOD] |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::CompareUuidCells [GOOD] >> Scheme::TSerializedCellMatrix [GOOD] |84.3%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest |84.3%| [TA] $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.3%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |84.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::NotEmptyCell [GOOD] |84.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> TypesProto::Decimal35 [GOOD] |84.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart [GOOD] >> TBsVDiskRepl3::AnubisTest [GOOD] >> TBsVDiskRepl3::ReplPerf |84.4%| [TA] {RESULT} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |84.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest |84.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/scheme/ut/unittest >> Scheme::TSerializedCellMatrix [GOOD] |84.4%| [TS] {BAZEL_UPLOAD} ydb/core/scheme/ut/unittest >> DqSpillingFileTests::FdCounterSingleFile >> DqSpillingFileTests::SingleFilePart >> DqSpillingFileTests::FdCounterSingleFile [GOOD] >> DqSpillingFileTests::FdCounterMultiFile |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TVDiskConfigTest::ThreeLevels [GOOD] |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest >> DqSpillingFileTests::SingleFilePart [GOOD] >> DqSpillingFileTests::StartError [GOOD] |84.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |84.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |84.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |84.4%| [TA] $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] Test command err: RandomSeed# 13823896289904566964 >> Donor::SlayAfterWiping [GOOD] >> DqSpillingFileTests::Simple >> AuthDatabaseAdmin::PassOnOwnerMatchUserSidWithGroup [GOOD] >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] >> DqSpillingFileTests::FdCounterMultiFile [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction >> DqSpillingFileTests::Simple [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse2Threads |84.4%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DqSpillingFileTests::ThreadPoolQueueOverflow ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::StartError [GOOD] Test command err: 2026-01-08T15:24:52.684743Z :KQP_COMPUTE ERROR: spilling_file.cpp:239: (TIoSystemError) (Error 13: Permission denied) util/folder/path.cpp:424: could not create directory /nonexistent 2026-01-08T15:24:52.684868Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2026-01-08T15:24:52.685140Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2026-01-08T15:24:52.685217Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::FdCounterMultiFile [GOOD] >> DqSpillingFileTests::NoSpillingService >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 8316752343905245119 2026-01-08T15:24:50.580297Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:24:50.582203Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10279631000478962042] 2026-01-08T15:24:50.601955Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> DqSpillingFileTests::NoSpillingService [GOOD] >> DqSpillingFileTests::ReadError |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Simple [GOOD] |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ReadError [GOOD] |84.4%| [TA] $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TLsnMngrTests::AllocLsnForLocalUse [GOOD] |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::SingleFilePart [GOOD] |84.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest >> ReadBatcher::ReadBatcher |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] Test command err: 2026-01-08T15:24:53.712885Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.713033Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.713101Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.713202Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.713259Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.713337Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.713404Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716342Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716459Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716543Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716638Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716708Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716772Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716841Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.716935Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717006Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717079Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717153Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717242Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717315Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717406Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717468Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717552Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717619Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717714Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717781Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717859Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.717934Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718014Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718099Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718193Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718266Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718409Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718536Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718608Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718671Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718750Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718821Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718900Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.718981Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719071Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719141Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719223Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719283Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719343Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719407Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.719496Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719600Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719676Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719750Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719814Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719897Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.719963Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720034Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720112Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720182Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.720231Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720313Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720400Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720501Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720581Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720660Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720748Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720810Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720890Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.720957Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.721005Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721078Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721152Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721215Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721290Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721376Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721465Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.721523Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721624Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721689Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721747Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721828Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.721915Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.722001Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.722089Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.722150Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.722225Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.722312Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729072Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729221Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729326Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.729384Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729458Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729537Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729618Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2026-01-08T15:24:53.729705Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729808Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729901Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.729996Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.730063Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2026-01-08T15:24:53.730219Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation |84.4%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TLsnMngrTests::AllocLsnForLocalUse2Threads [GOOD] >> TLsnMngrTests::AllocLsnForLocalUse10Threads ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ReadError [GOOD] Test command err: 2026-01-08T15:24:54.028071Z :KQP_COMPUTE ERROR: spilling_file.cpp:968: [Read async] file: /home/runner/.ya/build/build_root/663n/0024d7/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_42132/node_1_51effdf3-317b2f78-bfa23708-eb23de1e/1_test_0, blobId: 0, offset: 0, error: (Error 2: No such file or directory) util/system/file.cpp:936: can't open "/home/runner/.ya/build/build_root/663n/0024d7/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_42132/node_1_51effdf3-317b2f78-bfa23708-eb23de1e/1_test_0" with mode RdOnly (0x00000008) |84.4%| [TA] $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.4%| [TA] {RESULT} $(B)/ydb/core/scheme/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] |84.4%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::NoSpillingService [GOOD] >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] |84.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut_auth/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/spilling/ut/unittest >> Donor::SkipBadDonor [GOOD] |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TActorTest::TestWaitFuture |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TActorTest::TestWaitFuture [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne >> ReadBatcher::ReadBatcher [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 6567333072076834343 |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitFuture [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SkipBadDonor [GOOD] Test command err: RandomSeed# 13334472414930119051 2026-01-08T15:24:53.555930Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:24:53.557696Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2819063126599934214] 2026-01-08T15:24:53.588195Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TDelayedResponsesTests::Test [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> ConsoleDumper::Basic |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/testlib/actors/ut/unittest |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> ConsoleDumper::Basic [GOOD] >> ConsoleDumper::CoupleMerge >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] >> ConsoleDumper::CoupleMerge [GOOD] >> ConsoleDumper::CoupleOverwrite [GOOD] >> ConsoleDumper::CoupleMergeOverwriteRepeated [GOOD] >> ConsoleDumper::ReverseMerge [GOOD] >> ConsoleDumper::ReverseOverwrite [GOOD] >> ConsoleDumper::ReverseMergeOverwriteRepeated >> ConsoleDumper::ReverseMergeOverwriteRepeated [GOOD] >> ConsoleDumper::Different [GOOD] >> ConsoleDumper::SimpleNode [GOOD] >> ConsoleDumper::JoinSimilar [GOOD] >> ConsoleDumper::DontJoinDifferent [GOOD] >> ConsoleDumper::SimpleTenant [GOOD] >> ConsoleDumper::SimpleNodeTenant [GOOD] >> ConsoleDumper::SimpleHostId >> DataGeneratorState::PortionProcessing |84.5%| [TA] $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> DataGeneratorState::PortionProcessing [GOOD] >> DataGeneratorState::SaveLoad [GOOD] >> ConsoleDumper::SimpleHostId [GOOD] >> ConsoleDumper::SimpleNodeId [GOOD] >> ConsoleDumper::DontJoinNodeTenant [GOOD] >> ConsoleDumper::JoinMultipleSimple [GOOD] >> ConsoleDumper::MergeNode [GOOD] >> ConsoleDumper::MergeOverwriteRepeatedNode [GOOD] >> ConsoleDumper::Ordering [GOOD] >> ConsoleDumper::IgnoreUnmanagedItems [GOOD] >> IncompatibilityRules::BasicPatternMatching [GOOD] >> IncompatibilityRules::EmptyLabelMatching [GOOD] >> IncompatibilityRules::UnsetLabelMatching [GOOD] >> IncompatibilityRules::AddAndRemoveRules [GOOD] >> IncompatibilityRules::RuleOverride [GOOD] >> IncompatibilityRules::SimpleIncompatibilityCheck [GOOD] >> IncompatibilityRules::DisableRules [GOOD] >> IncompatibilityRules::MergeRules [GOOD] >> IncompatibilityRules::ParseEmptyOverrides [GOOD] >> IncompatibilityRules::ParseDisableRules [GOOD] >> IncompatibilityRules::ParseCustomRules [GOOD] >> IncompatibilityRules::ParseUnsetMarker [GOOD] >> IncompatibilityRules::ParseEmptyMarker [GOOD] >> IncompatibilityRules::DeterministicOrdering [GOOD] >> IncompatibilityRules::ComplexMultiPatternRule [GOOD] >> IncompatibilityRules::IntegrationWithResolveAll [GOOD] >> IncompatibilityRules::CheckLabelsMapCompatibility [GOOD] >> IncompatibilityRules::CheckLabelsMapWithUnsetMarker [GOOD] >> IncompatibilityRules::ValueInOperator |84.5%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> IncompatibilityRules::ValueInOperator [GOOD] >> IncompatibilityRules::ValueInMultipleValues [GOOD] >> IncompatibilityRules::NegatedFlag [GOOD] >> IncompatibilityRules::NegatedWithValueIn [GOOD] >> IncompatibilityRules::ComplexRuleWithNegation [GOOD] >> IncompatibilityRules::MonostateMatchesAnyValue [GOOD] >> IncompatibilityRules::RealWorldScenario_DynamicNodesValidation [GOOD] >> IncompatibilityRules::ComplexValueInNegationCombination [GOOD] >> IncompatibilityRules::EdgeCase_EmptyValueIn [GOOD] >> IncompatibilityRules::EdgeCase_MultipleUnsetEmptyInValueIn [GOOD] >> IncompatibilityRules::BuiltInRules_RequiredLabels [GOOD] >> IncompatibilityRules::BuiltInRules_StaticNodes [GOOD] >> IncompatibilityRules::BuiltInRules_DynamicNodes [GOOD] >> IncompatibilityRules::BuiltInRules_CloudNodes [GOOD] >> IncompatibilityRules::BuiltInRules_SlotNodes [GOOD] >> IncompatibilityRules::BuiltInRules_DisableSpecificRules [GOOD] >> IncompatibilityRules::BuiltInRules_DisableAll |84.5%| [TA] {RESULT} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::ReadBatcher [GOOD] >> IncompatibilityRules::BuiltInRules_DisableAll [GOOD] >> YamlConfig::CollectLabels [GOOD] >> YamlConfig::MaterializeSpecificConfig [GOOD] >> YamlConfig::MaterializeAllConfigSimple >> YamlConfig::MaterializeAllConfigSimple [GOOD] >> YamlConfig::MaterializeAllConfigs |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TAllocatorSuite128::SlotPoolLimit |84.5%| [TA] {RESULT} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TAllocatorSuite128::SlotPoolLimit [FAIL] >> TAllocatorSuite128::SlotPoolHugeAlloc >> TAllocatorSuite::AllocMemoryManually [FAIL] >> TIbvSuite::ListDevice [GOOD] >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPool/SlotMemPool |84.5%| [TA] $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BlobTest::Flags_HasPartData [GOOD] >> BlobTest::Flags_HasWriteTimestamp [GOOD] >> BlobTest::Flags_HasCreateTimestamp [GOOD] >> BlobTest::Flags_HasUncompressedSize [GOOD] >> BlobTest::Flags_HasKinesisData [GOOD] >> ClientBlobSerialization::EmptyPayload [GOOD] >> ClientBlobSerialization::SerializeAndDeserializeAllScenarios |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TDelayedResponsesTests::Test [GOOD] |84.5%| [TA] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/workload/benchmark_base/ut/unittest >> DataGeneratorState::SaveLoad [GOOD] |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.5%| [TA] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.5%| [TS] {BAZEL_UPLOAD} ydb/library/workload/benchmark_base/ut/unittest |84.5%| [TS] {RESULT} ydb/library/workload/benchmark_base/ut/unittest >> YamlConfig::MaterializeAllConfigs [GOOD] >> YamlConfig::AppendVolatileConfig |84.5%| [TA] $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 13190487264679908910 2026-01-08T15:24:43.120223Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:24:43.122903Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13228897087932561749] 2026-01-08T15:24:43.167024Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TAllocatorSuite128::SlotPoolTinyAlloc >> YamlConfig::AppendVolatileConfig [GOOD] >> YamlConfig::AppendAndResolve [GOOD] >> YamlConfig::GetMetadata [GOOD] >> YamlConfig::ReplaceMetadata [GOOD] >> YamlConfigParser::Iterate [GOOD] >> YamlConfigParser::ProtoBytesFieldDoesNotDecodeBase64 [GOOD] >> YamlConfigParser::PdiskCategoryFromString [GOOD] >> YamlConfigParser::AllowDefaultHostConfigId [GOOD] >> YamlConfigParser::IncorrectHostConfigIdFails [GOOD] >> YamlConfigParser::NoMixedHostConfigIds [GOOD] >> YamlConfigProto2Yaml::StorageConfig [GOOD] >> YamlConfigResolveUnique::NotUniqueSelectors >> FormatCSV::Instants [GOOD] >> FormatCSV::EmptyData [GOOD] >> FormatCSV::Common >> TSubgroupPartLayoutTest::CountEffectiveReplicas3of4 [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 >> TAllocatorSuite128::SlotPoolTinyAlloc [FAIL] >> TAllocatorSuite128::SlotPoolHugeAfterTinyAlloc [FAIL] >> TAllocatorSuite128::AllocationWithReclaimFourThreads >> YamlConfigResolveUnique::NotUniqueSelectors [GOOD] >> YamlConfigResolveUnique::AllTestConfigs >> FormatCSV::Common [GOOD] >> FormatCSV::Strings [GOOD] >> FormatCSV::Nulls [GOOD] |84.5%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> MetaCache::BasicForwarding |84.5%| [TA] {RESULT} $(B)/ydb/core/testlib/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPool/DummyMemPool >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] >> MetaCache::BasicForwarding [GOOD] >> MetaCache::TimeoutFallback [GOOD] >> YamlConfigResolveUnique::AllTestConfigs [GOOD] >> ConfigValidation::SameStaticGroup [GOOD] >> ConfigValidation::StaticGroupSizesGrow [GOOD] >> ConfigValidation::StaticGroupSizesShrink [GOOD] >> ConfigValidation::VDiskChanged [GOOD] >> ConfigValidation::TooManyVDiskChanged [GOOD] >> DatabaseConfigValidation::AllowedFields >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> DatabaseConfigValidation::AllowedFields [GOOD] >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> StateStorageConfigValidation::Empty [GOOD] >> StateStorageConfigValidation::Good [GOOD] >> StateStorageConfigValidation::NToSelect [GOOD] >> StateStorageConfigValidation::WriteOnly [GOOD] >> StateStorageConfigValidation::Disabled [GOOD] >> StateStorageConfigValidation::DisabledGood [GOOD] >> StateStorageConfigValidation::CanDisableAndChange [GOOD] >> StateStorageConfigValidation::CanChangeDisabled [GOOD] >> StateStorageConfigValidation::ChangesNotAllowed [GOOD] >> StateStorageConfigValidation::ValidateConfigSelfManagement [GOOD] >> StateStorageConfigValidation::ValidateConfigDomainEmpty [GOOD] >> StateStorageConfigValidation::ValidateConfigSSId [GOOD] >> StateStorageConfigValidation::ValidateConfigBad [GOOD] >> StateStorageConfigValidation::ValidateConfigValidatesStateStorage [GOOD] >> StateStorageConfigValidation::ValidateConfigGood [GOOD] >> StateStorageConfigValidation::ValidateConfigExplicitGood [GOOD] >> StateStorageConfigValidation::ValidateConfigExplicitBad [GOOD] >> DoubleIndexedTests::TestErase [GOOD] >> DoubleIndexedTests::TestFind [GOOD] >> DoubleIndexedTests::TestMerge [GOOD] >> DoubleIndexedTests::TestUpsertByBothKeys [GOOD] >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] >> TTimeGridTest::TimeGrid [GOOD] >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/io_formats/arrow/scheme/ut/unittest >> FormatCSV::Nulls [GOOD] Test command err: 12000000 Cannot read CSV: no columns specified Cannot read CSV: Invalid: Empty CSV file d'Artagnan '"' Jeanne d'Arc "'" 'd'Artagnan' ''"'' 'Jeanne d'Arc' '"'"' d'Artagnan '"' Jeanne d'Arc "'" src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: \N,"","" \N,"\N","\N" \N,\N,\N parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,\N,\N ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: NULL,"","" NULL,"NULL","NULL" NULL,NULL,NULL parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,NULL,NULL ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ |84.5%| [TS] {BAZEL_UPLOAD} ydb/core/io_formats/arrow/scheme/ut/unittest >> CpuCountTest::TestWithoutSharedThread [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenYandex >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPoolAsync/SlotMemPool |84.5%| [TS] {RESULT} ydb/core/io_formats/arrow/scheme/ut/unittest >> Mvp::OpenIdConnectRequestWithIamTokenYandex [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenNebius [GOOD] >> CpuCountTest::TestWithSharedThread [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodYandex [GOOD] >> HarmonizerTests::TestHarmonizerCreation [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius [GOOD] >> HarmonizerTests::TestAddPool [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieYandex [GOOD] >> HarmonizerTests::TestHarmonize [GOOD] >> HarmonizerTests::TestToNeedyNextToHoggish [GOOD] >> HarmonizerTests::TestToNeedyNextToStarved [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius >> HarmonizerTests::TestExchangeThreads [GOOD] >> HarmonizerTests::TestThreadCounts [GOOD] >> HarmonizerTests::TestSharedHalfThreads [GOOD] >> HarmonizerTests::TestSharedHalfThreadsStarved [GOOD] >> SharedInfoTests::TestInitialization [GOOD] >> SharedInfoTests::TestPull [GOOD] >> SharedInfoTests::TestCpuConsumptionCalculation [GOOD] >> ValueHistoryTests::TestConstructorAndInitialization [GOOD] >> ValueHistoryTests::TestBufferSizePowerOfTwo [GOOD] >> ValueHistoryTests::TestCircularBufferFilling [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/meta/ut/unittest >> MetaCache::TimeoutFallback [GOOD] Test command err: 2026-01-08T15:24:59.055014Z :HTTP INFO: http_proxy_acceptor.cpp:90: Listening on http://[::]:18965 2026-01-08T15:24:59.055420Z :HTTP INFO: http_proxy_acceptor.cpp:90: Listening on http://[::]:11492 2026-01-08T15:24:59.055892Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:14:2061] 2026-01-08T15:24:59.055949Z :HTTP DEBUG: http_proxy_outgoing.cpp:180: resolving 127.0.0.1:18965 2026-01-08T15:24:59.056146Z :HTTP DEBUG: http_proxy_outgoing.cpp:375: (#11,127.0.0.1:18965) connecting... 2026-01-08T15:24:59.056303Z :HTTP DEBUG: http_proxy_outgoing.cpp:321: (#11,127.0.0.1:18965) outgoing connection opened 2026-01-08T15:24:59.056346Z :HTTP DEBUG: http_proxy_outgoing.cpp:323: (#11,127.0.0.1:18965) <- (GET /server) 2026-01-08T15:24:59.056640Z :HTTP DEBUG: http_proxy_incoming.cpp:97: (#12,[::ffff:127.0.0.1]:40234) incoming connection opened 2026-01-08T15:24:59.056749Z :HTTP DEBUG: http_proxy_incoming.cpp:170: (#12,[::ffff:127.0.0.1]:40234) -> (GET /server) 2026-01-08T15:24:59.056866Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:11492 with deadline 2026-01-08T15:25:59.056847Z 2026-01-08T15:24:59.056908Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2026-01-08T15:25:59.056847Z (+1767885959.056847s) 2026-01-08T15:24:59.056970Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:11492 timeout 30.000000s 2026-01-08T15:24:59.057180Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:16:2063] 2026-01-08T15:24:59.057212Z :HTTP DEBUG: http_proxy_outgoing.cpp:180: resolving 127.0.0.1:11492 2026-01-08T15:24:59.057300Z :HTTP DEBUG: http_proxy_outgoing.cpp:375: (#13,127.0.0.1:11492) connecting... 2026-01-08T15:24:59.057379Z :HTTP DEBUG: http_proxy_outgoing.cpp:321: (#13,127.0.0.1:11492) outgoing connection opened 2026-01-08T15:24:59.057408Z :HTTP DEBUG: http_proxy_outgoing.cpp:323: (#13,127.0.0.1:11492) <- (GET /server) 2026-01-08T15:24:59.057555Z :HTTP DEBUG: http_proxy_incoming.cpp:97: (#14,[::ffff:127.0.0.1]:57314) incoming connection opened 2026-01-08T15:24:59.057647Z :HTTP DEBUG: http_proxy_incoming.cpp:170: (#14,[::ffff:127.0.0.1]:57314) -> (GET /server) 2026-01-08T15:24:59.057858Z :HTTP DEBUG: http_proxy_incoming.cpp:304: (#14,[::ffff:127.0.0.1]:57314) <- (200 Found, 6 bytes) 2026-01-08T15:24:59.057952Z :HTTP DEBUG: http_proxy_incoming.cpp:365: (#14,[::ffff:127.0.0.1]:57314) connection closed 2026-01-08T15:24:59.058131Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#13,127.0.0.1:11492) -> (200 Found, 6 bytes) 2026-01-08T15:24:59.058212Z :HTTP DEBUG: http_proxy_outgoing.cpp:110: (#13,127.0.0.1:11492) connection closed 2026-01-08T15:24:59.058477Z :HTTP DEBUG: meta_cache.cpp:146: Cache received successfull (200) response for /server 2026-01-08T15:24:59.058644Z :HTTP DEBUG: http_proxy_incoming.cpp:304: (#12,[::ffff:127.0.0.1]:40234) <- (200 Found, 6 bytes) 2026-01-08T15:24:59.058718Z :HTTP DEBUG: http_proxy_incoming.cpp:365: (#12,[::ffff:127.0.0.1]:40234) connection closed 2026-01-08T15:24:59.058808Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:18965) -> (200 Found, 6 bytes) 2026-01-08T15:24:59.058845Z :HTTP DEBUG: http_proxy_outgoing.cpp:110: (#11,127.0.0.1:18965) connection closed 2026-01-08T15:24:59.058922Z :HTTP DEBUG: http_proxy.cpp:131: Connection closed [1:16:2063] 2026-01-08T15:24:59.059037Z :HTTP DEBUG: http_proxy.cpp:131: Connection closed [1:14:2061] 2026-01-08T15:24:59.069366Z :HTTP INFO: http_proxy_acceptor.cpp:90: Listening on http://[::]:10689 2026-01-08T15:24:59.069793Z :HTTP INFO: http_proxy_acceptor.cpp:90: Listening on http://[::]:5780 2026-01-08T15:24:59.070192Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:14:2061] 2026-01-08T15:24:59.070253Z :HTTP DEBUG: http_proxy_outgoing.cpp:180: resolving 127.0.0.1:10689 2026-01-08T15:24:59.070440Z :HTTP DEBUG: http_proxy_outgoing.cpp:375: (#11,127.0.0.1:10689) connecting... 2026-01-08T15:24:59.070599Z :HTTP DEBUG: http_proxy_outgoing.cpp:321: (#11,127.0.0.1:10689) outgoing connection opened 2026-01-08T15:24:59.070659Z :HTTP DEBUG: http_proxy_outgoing.cpp:323: (#11,127.0.0.1:10689) <- (GET /server) 2026-01-08T15:24:59.071012Z :HTTP DEBUG: http_proxy_incoming.cpp:97: (#12,[::ffff:127.0.0.1]:49640) incoming connection opened 2026-01-08T15:24:59.071140Z :HTTP DEBUG: http_proxy_incoming.cpp:170: (#12,[::ffff:127.0.0.1]:49640) -> (GET /server) 2026-01-08T15:24:59.071340Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:5780 with deadline 2026-01-08T15:34:59.071308Z 2026-01-08T15:24:59.071388Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2026-01-08T15:34:59.071308Z (+1767886499.071308s) 2026-01-08T15:24:59.071459Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:5780 timeout 30.000000s 2026-01-08T15:24:59.071604Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:16:2063] 2026-01-08T15:24:59.071659Z :HTTP DEBUG: http_proxy_outgoing.cpp:180: resolving 127.0.0.1:5780 2026-01-08T15:24:59.071778Z :HTTP DEBUG: http_proxy_outgoing.cpp:375: (#13,127.0.0.1:5780) connecting... 2026-01-08T15:24:59.071881Z :HTTP DEBUG: http_proxy_outgoing.cpp:321: (#13,127.0.0.1:5780) outgoing connection opened 2026-01-08T15:24:59.071938Z :HTTP DEBUG: http_proxy_outgoing.cpp:323: (#13,127.0.0.1:5780) <- (GET /server) 2026-01-08T15:24:59.072064Z :HTTP ERROR: http_proxy_outgoing.cpp:124: (#13,127.0.0.1:5780) connection closed with error: Connection timed out 2026-01-08T15:24:59.072336Z :HTTP WARN: meta_cache.cpp:151: Cache received failed response with error "Connection timed out" for /server - retrying locally 2026-01-08T15:24:59.072422Z :HTTP DEBUG: http_proxy.cpp:131: Connection closed [2:16:2063] 2026-01-08T15:24:59.072554Z :HTTP DEBUG: http_proxy_incoming.cpp:97: (#13,[::ffff:127.0.0.1]:59314) incoming connection opened 2026-01-08T15:24:59.072673Z :HTTP DEBUG: http_proxy_incoming.cpp:170: (#13,[::ffff:127.0.0.1]:59314) -> (GET /server) 2026-01-08T15:24:59.072734Z :HTTP DEBUG: http_proxy_incoming.cpp:208: (#13,[::ffff:127.0.0.1]:59314) connection closed 2026-01-08T15:24:59.083501Z :HTTP DEBUG: http_proxy_incoming.cpp:304: (#12,[::ffff:127.0.0.1]:49640) <- (200 Found, 6 bytes) 2026-01-08T15:24:59.083998Z :HTTP DEBUG: http_proxy_incoming.cpp:365: (#12,[::ffff:127.0.0.1]:49640) connection closed 2026-01-08T15:24:59.084380Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:10689) -> (200 Found, 6 bytes) 2026-01-08T15:24:59.084466Z :HTTP DEBUG: http_proxy_outgoing.cpp:110: (#11,127.0.0.1:10689) connection closed 2026-01-08T15:24:59.084820Z :HTTP DEBUG: http_proxy.cpp:131: Connection closed [2:14:2061] >> TArrowPushDown::SimplePushDown [GOOD] >> TArrowPushDown::FilterEverything [GOOD] >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> ValueHistoryTests::TestBasicRegistration [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius [GOOD] >> ValueHistoryTests::TestRegistrationWithBackwardTime [GOOD] >> ValueHistoryTests::TestRegistrationWithLargeTimeGap [GOOD] >> ValueHistoryTests::TestRegistrationOnSecondBoundary [GOOD] >> ValueHistoryTests::TestAccumulationWithinSecond [GOOD] >> ValueHistoryTests::TestTransitionBetweenSeconds [GOOD] >> ValueHistoryTests::TestGetAvgPartForLastSeconds [GOOD] >> ValueHistoryTests::TestGetAvgPartWithTail [GOOD] >> ValueHistoryTests::TestGetAvgPartWithIncompleteData [GOOD] >> Mvp::OpenIdConnectProxyOnHttpsHost [GOOD] >> ValueHistoryTests::TestGetAvgPartOnBufferBoundary [GOOD] >> Mvp::OpenIdConnectFixLocationHeader >> ValueHistoryTests::TestGetMaxForLastSeconds [GOOD] >> ValueHistoryTests::TestGetMax [GOOD] >> ValueHistoryTests::TestGetMaxWithNegativeValues [GOOD] >> ValueHistoryTests::TestGetMinForLastSeconds [GOOD] >> ValueHistoryTests::TestGetMin [GOOD] >> ValueHistoryTests::TestGetMinWithNegativeValues [GOOD] >> ValueHistoryTests::TestBufferOverflow [GOOD] >> ValueHistoryTests::TestSmallTimeIntervals [GOOD] >> ValueHistoryTests::TestLargeTimeIntervals [GOOD] >> ValueHistoryTests::TestPrecisionOverLongDuration [GOOD] >> ValueHistoryTests::TestRoundingDuringTimestampConversion [GOOD] >> ValueHistoryTests::TestAverageCalculationPrecision [GOOD] >> ParseStats::ParseWithSources [GOOD] >> ParseStats::ParseJustOutput [GOOD] >> ParseStats::ParseMultipleGraphsV1 [GOOD] >> ParseStats::ParseMultipleGraphsV2 [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHuge [GOOD] |84.5%| [TS] {BAZEL_UPLOAD} ydb/mvp/meta/ut/unittest |84.5%| [TS] {RESULT} ydb/mvp/meta/ut/unittest |84.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/defrag/ut/unittest >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased >> TLsnMngrTests::AllocLsnForLocalUse10Threads [GOOD] >> TOutOfSpaceStateTests::TestLocal [GOOD] >> TOutOfSpaceStateTests::TestGlobal [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_large/unittest |84.5%| [TS] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/defrag/ut/unittest >> Mvp::OpenIdConnectFixLocationHeader [GOOD] >> Mvp::OpenIdConnectExchangeNebius |84.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] Test command err: 2026-01-08T15:24:58.765696Z :BS_VDISK_GET CRIT: query_base.h:102: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVGetResult: Result message is too large; size# 67108001 orig# {ExtrQuery# [5000:1:0:0:0:100000:1] sh# 257 sz# 99743 c# 0}{ExtrQuery# [5000:1:1:0:0:100000:1] sh# 257 sz# 99743 c# 1}{ExtrQuery# [5000:1:2:0:0:100000:1] sh# 257 sz# 99743 c# 2}{ExtrQuery# [5000:1:3:0:0:100000:1] sh# 257 sz# 99743 c# 3}{ExtrQuery# [5000:1:4:0:0:100000:1] sh# 257 sz# 99743 c# 4}{ExtrQuery# [5000:1:5:0:0:100000:1] sh# 257 sz# 99743 c# 5}{ExtrQuery# [5000:1:6:0:0:100000:1] sh# 257 sz# 99743 c# 6}{ExtrQuery# [5000:1:7:0:0:100000:1] sh# 257 sz# 99743 c# 7}{ExtrQuery# [5000:1:8:0:0:100000:1] sh# 257 sz# 99743 c# 8}{ExtrQuery# [5000:1:9:0:0:100000:1] sh# 257 sz# 99743 c# 9}{ExtrQuery# [5000:1:10:0:0:100000:1] sh# 257 sz# 99743 c# 10}{ExtrQuery# [5000:1:11:0:0:100000:1] sh# 257 sz# 99743 c# 11}{ExtrQuery# [5000:1:12:0:0:100000:1] sh# 257 sz# 99743 c# 12}{ExtrQuery# [5000:1:13:0:0:100000:1] sh# 257 sz# 99743 c# 13}{ExtrQuery# [5000:1:14:0:0:100000:1] sh# 257 sz# 99743 c# 14}{ExtrQuery# [5000:1:15:0:0:100000:1] sh# 257 sz# 99743 c# 15}{ExtrQuery# [5000:1:16:0:0:100000:1] sh# 257 sz# 99743 c# 16}{ExtrQuery# [5000:1:17:0:0:100000:1] sh# 257 sz# 99743 c# 17}{ExtrQuery# [5000:1:18:0:0:100000:1] sh# 257 sz# 99743 c# 18}{ExtrQuery# [5000:1:19:0:0:100000:1] sh# 257 sz# 99743 c# 19}{ExtrQuery# [5000:1:20:0:0:100000:1] sh# 257 sz# 99743 c# 20}{ExtrQuery# [5000:1:21:0:0:100000:1] sh# 257 sz# 99743 c# 21}{ExtrQuery# [5000:1:22:0:0:100000:1] sh# 257 sz# 99743 c# 22}{ExtrQuery# [5000:1:23:0:0:100000:1] sh# 257 sz# 99743 c# 23}{ExtrQuery# [5000:1:24:0:0:100000:1] sh# 257 sz# 99743 c# 24}{ExtrQuery# [5000:1:25:0:0:100000:1] sh# 257 sz# 99743 c# 25}{ExtrQuery# [5000:1:26:0:0:100000:1] sh# 257 sz# 99743 c# 26}{ExtrQuery# [5000:1:27:0:0:100000:1] sh# 257 sz# 99743 c# 27}{ExtrQuery# [5000:1:28:0:0:100000:1] sh# 257 sz# 99743 c# 28}{ExtrQuery# [5000:1:29:0:0:100000:1] sh# 257 sz# 99743 c# 29}{ExtrQuery# [5000:1:30:0:0:100000:1] sh# 257 sz# 99743 c# 30}{ExtrQuery# [5000:1:31:0:0:100000:1] sh# 257 sz# 99743 c# 31}{ExtrQuery# [5000:1:32:0:0:100000:1] sh# 257 sz# 99743 c# 32}{ExtrQuery# [5000:1:33:0:0:100000:1] sh# 257 sz# 99743 c# 33}{ExtrQuery# [5000:1:34:0:0:100000:1] sh# 257 sz# 99743 c# 34}{ExtrQuery# [5000:1:35:0:0:100000:1] sh# 257 sz# 99743 c# 35}{ExtrQuery# [5000:1:36:0:0:100000:1] sh# 257 sz# 99743 c# 36}{ExtrQuery# [5000:1:37:0:0:100000:1] sh# 257 sz# 99743 c# 37}{ExtrQuery# [5000:1:38:0:0:100000:1] sh# 257 sz# 99743 c# 38}{ExtrQuery# [5000:1:39:0:0:100000:1] sh# 257 sz# 99743 c# 39}{ExtrQuery# [5000:1:40:0:0:100000:1] sh# 257 sz# 99743 c# 40}{ExtrQuery# [5000:1:41:0:0:100000:1] sh# 257 sz# 99743 c# 41}{ExtrQuery# [5000:1:42:0:0:100000:1] sh# 257 sz# 99743 c# 42}{ExtrQuery# [5000:1:43:0:0:100000:1] sh# 257 sz# 99743 c# 43}{ExtrQuery# [5000:1:44:0:0:100000:1] sh# 257 sz# 99743 c# 44}{ExtrQuery# [5000:1:45:0:0:100000:1] sh# 257 sz# 99743 c# 45}{ExtrQuery# [5000:1:46:0:0:100000:1] sh# 257 sz# 99743 c# 46}{ExtrQuery# [5000:1:47:0:0:100000:1] sh# 257 sz# 99743 c# 47}{ExtrQuery# [5000:1:48:0:0:100000:1] sh# 257 sz# 99743 c# 48}{ExtrQuery# [5000:1:49:0:0:100000:1] sh# 257 sz# 99743 c# 49}{ExtrQuery# [5000:1:50:0:0:100000:1] sh# 257 sz# 99743 c# 50}{ExtrQuery# [5000:1:51:0:0:100000:1] sh# 257 sz# 99743 c# 51}{ExtrQuery# [5000:1:52:0:0:100000:1] sh# 257 sz# 99743 c# 52}{ExtrQuery# [5000:1:53:0:0:100000:1] sh# 257 sz# 99743 c# 53}{ExtrQuery# [5000:1:54:0:0:100000:1] sh# 257 sz# 99743 c# 54}{ExtrQuery# [5000:1:55:0:0:100000:1] sh# 257 sz# 99743 c# 55}{ExtrQuery# [5000:1:56:0:0:100000:1] sh# 257 sz# 99743 c# 56}{ExtrQuery# [5000:1:57:0:0:100000:1] sh# 257 sz# 99743 c# 57}{ExtrQuery# [5000:1:58:0:0:100000:1] sh# 257 sz# 99743 c# 58}{ExtrQuery# [5000:1:59:0:0:100000:1] sh# 257 sz# 99743 c# 59}{ExtrQuery# [5000:1:60:0:0:100000:1] sh# 257 sz# 99743 c# 60}{ExtrQuery# [5000:1:61:0:0:100000:1] sh# 257 sz# 99743 c# 61}{ExtrQuery# [5000:1:62:0:0:100000:1] sh# 257 sz# 99743 c# 62}{ExtrQuery# [5000:1:63:0:0:100000:1] sh# 257 sz# 99743 c# 63}{ExtrQuery# [5000:1:64:0:0:100000:1] sh# 257 sz# 99743 c# 64}{ExtrQuery# [5000:1:65:0:0:100000:1] sh# 257 sz# 99743 c# 65}{ExtrQuery# [5000:1:66:0:0:100000:1] sh# 257 sz# 99743 c# 66}{ExtrQuery# [5000:1:67:0:0:100000:1] sh# 257 sz# 99743 c# 67}{ExtrQuery# [5000:1:68:0:0:100000:1] sh# 257 sz# 99743 c# 68}{ExtrQuery# [5000:1:69:0:0:100000:1] sh# 257 sz# 99743 c# 69}{ExtrQuery# [5000:1:70:0:0:100000:1] sh# 257 sz# 99743 c# 70}{ExtrQuery# [5000:1:71:0:0:100000:1] sh# 257 sz# 99743 c# 71}{ExtrQuery# [5000:1:72:0:0:100000:1] sh# 257 sz# 99743 c# 72}{ExtrQuery# [5000:1:73:0:0:100000:1] sh# 257 sz# 99743 c# 73}{ExtrQuery# [5000:1:74:0:0:100000:1] sh# 257 sz# 99743 c# 74}{ExtrQuery# [5000:1:75:0:0:100000:1] sh# 257 sz# 99743 c# 75}{ExtrQuery# [5000:1:76:0:0:100000:1] sh# 257 sz# 99743 c# 76}{ExtrQuery# [5000:1:77:0:0:100000:1] sh# 257 sz# 99743 c# 77}{ExtrQuery# [5000:1:78:0:0:100000:1] sh# 257 sz# 99743 c# 78}{ExtrQuery# [5000:1:79:0:0:100000:1] sh# 257 sz# 99743 c# 79}{ExtrQuery# [5000:1:80:0:0:100000:1] sh# 257 sz# 99743 c# 80}{ExtrQuery# [5000:1:81:0:0:100000:1] sh# 257 sz# 99743 c# 81}{ExtrQuery# [5000:1:82:0:0:100000:1] sh# 257 sz# 99743 c# 82}{ExtrQuery# [5000:1:83:0:0:100000:1] sh# 257 sz# 99743 c# 83}{ExtrQuery# [5000:1:84:0:0:100000:1] sh# 257 sz# 99743 c# 84}{ExtrQuery# [5000:1:85:0:0:100000:1] sh# 257 sz# 99743 c# 85}{ExtrQuery# [5000:1:86:0:0:100000:1] sh# 257 sz# 99743 c# 86}{ExtrQuery# [5000:1:87:0:0:100000:1] sh# 257 sz# 99743 c# 87}{ExtrQuery# [5000:1:88:0:0:100000:1] sh# 257 sz# 99743 c# 88}{ExtrQuery# [5000:1:89:0:0:100000:1] sh# 257 sz# 99743 c# 89}{ExtrQuery# [5000:1:90:0:0:100000:1] sh# 257 sz# 99743 c# 90}{ExtrQuery# [5000:1:91:0:0:100000:1] sh# 257 sz# 99743 c# 91}{ExtrQuery# [5000:1:92:0:0:100000:1] sh# 257 sz# 99743 c# 92}{ExtrQuery# [5000:1:93:0:0:100000:1] sh# 257 sz# 99743 c# 93}{ExtrQuery# [5000:1:94:0:0:100000:1] sh# 257 sz# 99743 c# 94}{ExtrQuery# [5000:1:95:0:0:100000:1] sh# 257 sz# 99743 c# 95}{ExtrQuery# [5000:1:96:0:0:100000:1] sh# 257 sz# 99743 c# 96}{ExtrQuery# [5000:1:97:0:0:100000:1] sh# 257 sz# 99743 c# 97}{ExtrQuery# [5000:1:98:0:0:100000:1] sh# 257 sz# 99743 c# 98}{ExtrQuery# [5000:1:99:0:0:100000:1] sh# 257 sz# 99743 c# 99}{ExtrQuery# [5000:1:100:0:0:100000:1] sh# 257 sz# 99743 c# 100}{ExtrQuery# [5000:1:101:0:0:100000:1] sh# 257 sz# 99743 c# 101}{ExtrQuery# [5000:1:102:0:0:100000:1] sh# 257 sz# 99743 c# 102}{ExtrQuery# [5000:1:103:0:0:100000:1] sh# 257 sz# 99743 c# 103}{ExtrQuery# [5000:1:104:0:0:100000:1] sh# 257 sz# 99743 c# 104}{ExtrQuery# [5000:1:105:0:0:100000:1] sh# 257 sz# 99743 c# 105}{ExtrQuery# [5000:1:106:0:0:100000:1] sh# 257 sz# 99743 c# 106}{ExtrQuery# [5000:1:107:0:0:100000:1] sh# 257 sz# 99743 c# 107}{ExtrQuery# [5000:1:108:0:0:100000:1] sh# 257 sz# 99743 c# 108}{ExtrQuery# [5000:1:109:0:0:100000:1] sh# 257 sz# 99743 c# 109}{ExtrQuery# [5000:1:110:0:0:100000:1] sh# 257 sz# 99743 c# 110}{ExtrQuery# [5000:1:111:0:0:100000:1] sh# 257 sz# 99743 c# 111}{ExtrQuery# [5000:1:112:0:0:100000:1] sh# 257 sz# 99743 c# 112}{ExtrQuery# [5000:1:113:0:0:100000:1] sh# 257 sz# 99743 c# 113}{ExtrQuery# [5000:1:114:0:0:100000:1] sh# 257 sz# 99743 c# 114}{ExtrQuery# [5000:1:115:0:0:100000:1] sh# 257 sz# 99743 c# 115}{ExtrQuery# [5000:1:116:0:0:100000:1] sh# 257 sz# 99743 c# 116}{ExtrQuery# [5000:1:117:0:0:100000:1] sh# 257 sz# 99743 c# 117}{ExtrQuery# [5000:1:118:0:0:100000:1] sh# 257 sz# 99743 c# 118}{ExtrQuery# [5000:1:119:0:0:100000:1] sh# 257 sz# 99743 c# 119}{ExtrQuery# [5000:1:120:0:0:100000:1] sh# 257 sz# 99743 c# 120}{ExtrQuery# [5000:1:121:0:0:100000:1] sh# 257 sz# 99743 c# 121}{ExtrQuery# [5000:1:122:0:0:100000:1] sh# 257 sz# 99743 c# 122}{ExtrQuery# [5000:1:123:0:0:100000:1] sh# 257 sz# 99743 c# 123}{ExtrQuery# [5000:1:124:0:0:100000:1] sh# 257 sz# 99743 c# 124}{ExtrQuery# [5000:1:125:0:0:100000:1] sh# 257 sz# 99743 c# 125}{ExtrQuery# [5000:1:126:0:0:100000:1] sh# 257 sz# 99743 c# 126}{ExtrQuery# [5000:1:127:0:0:100000:1] sh# 257 sz# 99743 c# 127}{ExtrQuery# [5000:1:128:0:0:100000:1] sh# 257 sz# 99743 c# 128}{ExtrQuery# [5000:1:129:0:0:100000:1] sh# 257 sz# 99743 c# 129}{ExtrQuery# [5000:1:130:0:0:100000:1] sh# 257 sz# 99743 c# 130}{ExtrQuery# [5000:1:131:0:0:100000:1] sh# 257 sz# 99743 c# 131}{ExtrQuery# [5000:1:132:0:0:100000:1] sh# 257 sz# 99743 c# 132}{ExtrQuery# [5000:1:133:0:0:100000:1] sh# 257 sz# 99743 c# 133}{ExtrQuery# [5000:1:134:0:0:100000:1] sh# 257 sz# 99743 c# 134}{ExtrQuery# [5000:1:135:0:0:100000:1] sh# 257 sz# 99743 c# 135}{ExtrQuery# [5000:1:136:0:0:100000:1] sh# 257 sz# 99743 c# 136}{ExtrQuery# [5000:1:137:0:0:100000:1] sh# 257 sz# 99743 c# 137}{ExtrQuery# [5000:1:138:0:0:100000:1] sh# 257 sz# 99743 c# 138}{ExtrQuery# [5000:1:139:0:0:100000:1] sh# 257 sz# 99743 c# 139}{ExtrQuery# [5000:1:140:0:0:100000:1] sh# 257 sz# 99743 c# 140}{ExtrQuery# [5000:1:141:0:0:100000:1] sh# 257 sz# 99743 c# 141}{ExtrQuery# [5000:1:142:0:0:100000:1] sh# 257 sz# 99743 c# 142}{ExtrQuery# [5000:1:143:0:0:100000:1] sh# 257 sz# 99743 c# 143}{ExtrQuery# [5000:1:144:0:0:100000:1] sh# 257 sz# 99743 c# 144}{ExtrQuery# [5000:1:145:0:0:100000:1] sh# 257 sz# 99743 c# 145}{ExtrQuery# [5000:1:146:0:0:100000:1] sh# 257 sz# 99743 c# 146}{ExtrQuery# [5000:1:147:0:0:100000:1] sh# 257 sz# 99743 c# 147}{ExtrQuery# [5000:1:148:0:0:100000:1] sh# 257 sz# 99743 c# 148}{ExtrQuery# [5000:1:149:0:0:100000:1] sh# 257 sz# 99743 c# 149}{ExtrQuery# [5000:1:150:0:0:100000:1] sh# 257 sz# 99743 c# 150}{ExtrQuery# [5000:1:151:0:0:100000:1] sh# 257 sz# 99743 c# 151}{ExtrQuery# [5000:1:152:0:0:100000:1] sh# 257 sz# 99743 c# 152}{ExtrQuery# [5000:1:153:0:0:100000:1] sh# 257 sz# 99743 c# 153}{ExtrQuery# [5000:1:154:0:0:100000:1] sh# 257 sz# 99743 c# 154}{ExtrQuery# [5000:1:155:0:0:100000:1] sh# 257 sz# 99743 c# 155}{ExtrQuery# [5000:1:156:0:0:100000:1] sh# 257 sz# 99743 c# 156}{ExtrQuery# [5000:1:157:0:0:100000:1] sh# 257 sz# 99743 c# 157}{ExtrQuery# [5000:1:158:0:0:100000:1] sh# 257 sz# 99743 c# 158}{ExtrQuery# [5000:1:159:0:0:100000:1] sh# 257 sz# 99743 c# 159}{ExtrQuery# [5000:1:160:0:0:100000:1] sh# 257 sz# 99743 c# 160}{ExtrQuery# [5000:1:161:0:0:100000:1] sh# 257 sz# 99743 c# 161}{ExtrQuery# [5000:1:162:0:0:100000:1] sh# 257 sz# 99743 c# 162}{ExtrQuery# [5000:1:163:0:0:100000:1] sh# 257 sz# 99743 c# 163}{ExtrQuery# [5000:1:164:0:0:100000:1] sh# 257 sz# 99743 c# 164}{ExtrQuery# [5000:1:165:0:0:100000:1] sh# 257 sz# 99743 c# 165}{ExtrQuery# [5000:1:166:0:0:100000:1] sh# 257 sz# 99743 c# 166}{ExtrQuery# [5000:1:167:0:0:100000:1] sh# 257 sz# 99743 c# 167}{ExtrQuery# [5000:1:168:0:0:100000:1] sh# 257 sz# 99743 c# 168}{ExtrQuery# [5000:1:169:0:0:100000:1] sh# 257 sz# 99743 c# 169}{ExtrQuery# [5000:1:170:0:0:100000:1] sh# 257 sz# 99743 c# 170}{ExtrQuery# [5000:1:171:0:0:100000:1] sh# 257 sz# 99743 c# 171}{ExtrQuery# [5000:1:172:0:0:100000:1] sh# 257 sz# 99743 c# 172}{ExtrQuery# [5000:1:173:0:0:100000:1] sh# 257 sz# 99743 c# 173}{ExtrQuery# [5000:1:174:0:0:100000:1] sh# 257 sz# 99743 c# 174}{ExtrQuery# [5000:1:175:0:0:100000:1] sh# 257 sz# 99743 c# 175}{ExtrQuery# [5000:1:176:0:0:100000:1] sh# 257 sz# 99743 c# 176}{ExtrQuery# [5000:1:177:0:0:100000:1] sh# 257 sz# 99743 c# 177}{ExtrQuery# [5000:1:178:0:0:100000:1] sh# 257 sz# 99743 c# 178}{ExtrQuery# [5000:1:179:0:0:100000:1] sh# 257 sz# 99743 c# 179}{ExtrQuery# [5000:1:180:0:0:100000:1] sh# 257 sz# 99743 c# 180}{ExtrQuery# [5000:1:181:0:0:100000:1] sh# 257 sz# 99743 c# 181}{ExtrQuery# [5000:1:182:0:0:100000:1] sh# 257 sz# 99743 c# 182}{ExtrQuery# [5000:1:183:0:0:100000:1] sh# 257 sz# 99743 c# 183}{ExtrQuery# [5000:1:184:0:0:100000:1] sh# 257 sz# 99743 c# 184}{ExtrQuery# [5000:1:185:0:0:100000:1] sh# 257 sz# 99743 c# 185}{ExtrQuery# [5000:1:186:0:0:100000:1] sh# 257 sz# 99743 c# 186}{ExtrQuery# [5000:1:187:0:0:100000:1] sh# 257 sz# 99743 c# 187}{ExtrQuery# [5000:1:188:0:0:100000:1] sh# 257 sz# 99743 c# 188}{ExtrQuery# [5000:1:189:0:0:100000:1] sh# 257 sz# 99743 c# 189}{ExtrQuery# [5000:1:190:0:0:100000:1] sh# 257 sz# 99743 c# 190}{ExtrQuery# [5000:1:191 ... sz# 99743 c# 484}{ExtrQuery# [5000:1:485:0:0:100000:1] sh# 257 sz# 99743 c# 485}{ExtrQuery# [5000:1:486:0:0:100000:1] sh# 257 sz# 99743 c# 486}{ExtrQuery# [5000:1:487:0:0:100000:1] sh# 257 sz# 99743 c# 487}{ExtrQuery# [5000:1:488:0:0:100000:1] sh# 257 sz# 99743 c# 488}{ExtrQuery# [5000:1:489:0:0:100000:1] sh# 257 sz# 99743 c# 489}{ExtrQuery# [5000:1:490:0:0:100000:1] sh# 257 sz# 99743 c# 490}{ExtrQuery# [5000:1:491:0:0:100000:1] sh# 257 sz# 99743 c# 491}{ExtrQuery# [5000:1:492:0:0:100000:1] sh# 257 sz# 99743 c# 492}{ExtrQuery# [5000:1:493:0:0:100000:1] sh# 257 sz# 99743 c# 493}{ExtrQuery# [5000:1:494:0:0:100000:1] sh# 257 sz# 99743 c# 494}{ExtrQuery# [5000:1:495:0:0:100000:1] sh# 257 sz# 99743 c# 495}{ExtrQuery# [5000:1:496:0:0:100000:1] sh# 257 sz# 99743 c# 496}{ExtrQuery# [5000:1:497:0:0:100000:1] sh# 257 sz# 99743 c# 497}{ExtrQuery# [5000:1:498:0:0:100000:1] sh# 257 sz# 99743 c# 498}{ExtrQuery# [5000:1:499:0:0:100000:1] sh# 257 sz# 99743 c# 499}{ExtrQuery# [5000:1:500:0:0:100000:1] sh# 257 sz# 99743 c# 500}{ExtrQuery# [5000:1:501:0:0:100000:1] sh# 257 sz# 99743 c# 501}{ExtrQuery# [5000:1:502:0:0:100000:1] sh# 257 sz# 99743 c# 502}{ExtrQuery# [5000:1:503:0:0:100000:1] sh# 257 sz# 99743 c# 503}{ExtrQuery# [5000:1:504:0:0:100000:1] sh# 257 sz# 99743 c# 504}{ExtrQuery# [5000:1:505:0:0:100000:1] sh# 257 sz# 99743 c# 505}{ExtrQuery# [5000:1:506:0:0:100000:1] sh# 257 sz# 99743 c# 506}{ExtrQuery# [5000:1:507:0:0:100000:1] sh# 257 sz# 99743 c# 507}{ExtrQuery# [5000:1:508:0:0:100000:1] sh# 257 sz# 99743 c# 508}{ExtrQuery# [5000:1:509:0:0:100000:1] sh# 257 sz# 99743 c# 509}{ExtrQuery# [5000:1:510:0:0:100000:1] sh# 257 sz# 99743 c# 510}{ExtrQuery# [5000:1:511:0:0:100000:1] sh# 257 sz# 99743 c# 511}{ExtrQuery# [5000:1:512:0:0:100000:1] sh# 257 sz# 99743 c# 512}{ExtrQuery# [5000:1:513:0:0:100000:1] sh# 257 sz# 99743 c# 513}{ExtrQuery# [5000:1:514:0:0:100000:1] sh# 257 sz# 99743 c# 514}{ExtrQuery# [5000:1:515:0:0:100000:1] sh# 257 sz# 99743 c# 515}{ExtrQuery# [5000:1:516:0:0:100000:1] sh# 257 sz# 99743 c# 516}{ExtrQuery# [5000:1:517:0:0:100000:1] sh# 257 sz# 99743 c# 517}{ExtrQuery# [5000:1:518:0:0:100000:1] sh# 257 sz# 99743 c# 518}{ExtrQuery# [5000:1:519:0:0:100000:1] sh# 257 sz# 99743 c# 519}{ExtrQuery# [5000:1:520:0:0:100000:1] sh# 257 sz# 99743 c# 520}{ExtrQuery# [5000:1:521:0:0:100000:1] sh# 257 sz# 99743 c# 521}{ExtrQuery# [5000:1:522:0:0:100000:1] sh# 257 sz# 99743 c# 522}{ExtrQuery# [5000:1:523:0:0:100000:1] sh# 257 sz# 99743 c# 523}{ExtrQuery# [5000:1:524:0:0:100000:1] sh# 257 sz# 99743 c# 524}{ExtrQuery# [5000:1:525:0:0:100000:1] sh# 257 sz# 99743 c# 525}{ExtrQuery# [5000:1:526:0:0:100000:1] sh# 257 sz# 99743 c# 526}{ExtrQuery# [5000:1:527:0:0:100000:1] sh# 257 sz# 99743 c# 527}{ExtrQuery# [5000:1:528:0:0:100000:1] sh# 257 sz# 99743 c# 528}{ExtrQuery# [5000:1:529:0:0:100000:1] sh# 257 sz# 99743 c# 529}{ExtrQuery# [5000:1:530:0:0:100000:1] sh# 257 sz# 99743 c# 530}{ExtrQuery# [5000:1:531:0:0:100000:1] sh# 257 sz# 99743 c# 531}{ExtrQuery# [5000:1:532:0:0:100000:1] sh# 257 sz# 99743 c# 532}{ExtrQuery# [5000:1:533:0:0:100000:1] sh# 257 sz# 99743 c# 533}{ExtrQuery# [5000:1:534:0:0:100000:1] sh# 257 sz# 99743 c# 534}{ExtrQuery# [5000:1:535:0:0:100000:1] sh# 257 sz# 99743 c# 535}{ExtrQuery# [5000:1:536:0:0:100000:1] sh# 257 sz# 99743 c# 536}{ExtrQuery# [5000:1:537:0:0:100000:1] sh# 257 sz# 99743 c# 537}{ExtrQuery# [5000:1:538:0:0:100000:1] sh# 257 sz# 99743 c# 538}{ExtrQuery# [5000:1:539:0:0:100000:1] sh# 257 sz# 99743 c# 539}{ExtrQuery# [5000:1:540:0:0:100000:1] sh# 257 sz# 99743 c# 540}{ExtrQuery# [5000:1:541:0:0:100000:1] sh# 257 sz# 99743 c# 541}{ExtrQuery# [5000:1:542:0:0:100000:1] sh# 257 sz# 99743 c# 542}{ExtrQuery# [5000:1:543:0:0:100000:1] sh# 257 sz# 99743 c# 543}{ExtrQuery# [5000:1:544:0:0:100000:1] sh# 257 sz# 99743 c# 544}{ExtrQuery# [5000:1:545:0:0:100000:1] sh# 257 sz# 99743 c# 545}{ExtrQuery# [5000:1:546:0:0:100000:1] sh# 257 sz# 99743 c# 546}{ExtrQuery# [5000:1:547:0:0:100000:1] sh# 257 sz# 99743 c# 547}{ExtrQuery# [5000:1:548:0:0:100000:1] sh# 257 sz# 99743 c# 548}{ExtrQuery# [5000:1:549:0:0:100000:1] sh# 257 sz# 99743 c# 549}{ExtrQuery# [5000:1:550:0:0:100000:1] sh# 257 sz# 99743 c# 550}{ExtrQuery# [5000:1:551:0:0:100000:1] sh# 257 sz# 99743 c# 551}{ExtrQuery# [5000:1:552:0:0:100000:1] sh# 257 sz# 99743 c# 552}{ExtrQuery# [5000:1:553:0:0:100000:1] sh# 257 sz# 99743 c# 553}{ExtrQuery# [5000:1:554:0:0:100000:1] sh# 257 sz# 99743 c# 554}{ExtrQuery# [5000:1:555:0:0:100000:1] sh# 257 sz# 99743 c# 555}{ExtrQuery# [5000:1:556:0:0:100000:1] sh# 257 sz# 99743 c# 556}{ExtrQuery# [5000:1:557:0:0:100000:1] sh# 257 sz# 99743 c# 557}{ExtrQuery# [5000:1:558:0:0:100000:1] sh# 257 sz# 99743 c# 558}{ExtrQuery# [5000:1:559:0:0:100000:1] sh# 257 sz# 99743 c# 559}{ExtrQuery# [5000:1:560:0:0:100000:1] sh# 257 sz# 99743 c# 560}{ExtrQuery# [5000:1:561:0:0:100000:1] sh# 257 sz# 99743 c# 561}{ExtrQuery# [5000:1:562:0:0:100000:1] sh# 257 sz# 99743 c# 562}{ExtrQuery# [5000:1:563:0:0:100000:1] sh# 257 sz# 99743 c# 563}{ExtrQuery# [5000:1:564:0:0:100000:1] sh# 257 sz# 99743 c# 564}{ExtrQuery# [5000:1:565:0:0:100000:1] sh# 257 sz# 99743 c# 565}{ExtrQuery# [5000:1:566:0:0:100000:1] sh# 257 sz# 99743 c# 566}{ExtrQuery# [5000:1:567:0:0:100000:1] sh# 257 sz# 99743 c# 567}{ExtrQuery# [5000:1:568:0:0:100000:1] sh# 257 sz# 99743 c# 568}{ExtrQuery# [5000:1:569:0:0:100000:1] sh# 257 sz# 99743 c# 569}{ExtrQuery# [5000:1:570:0:0:100000:1] sh# 257 sz# 99743 c# 570}{ExtrQuery# [5000:1:571:0:0:100000:1] sh# 257 sz# 99743 c# 571}{ExtrQuery# [5000:1:572:0:0:100000:1] sh# 257 sz# 99743 c# 572}{ExtrQuery# [5000:1:573:0:0:100000:1] sh# 257 sz# 99743 c# 573}{ExtrQuery# [5000:1:574:0:0:100000:1] sh# 257 sz# 99743 c# 574}{ExtrQuery# [5000:1:575:0:0:100000:1] sh# 257 sz# 99743 c# 575}{ExtrQuery# [5000:1:576:0:0:100000:1] sh# 257 sz# 99743 c# 576}{ExtrQuery# [5000:1:577:0:0:100000:1] sh# 257 sz# 99743 c# 577}{ExtrQuery# [5000:1:578:0:0:100000:1] sh# 257 sz# 99743 c# 578}{ExtrQuery# [5000:1:579:0:0:100000:1] sh# 257 sz# 99743 c# 579}{ExtrQuery# [5000:1:580:0:0:100000:1] sh# 257 sz# 99743 c# 580}{ExtrQuery# [5000:1:581:0:0:100000:1] sh# 257 sz# 99743 c# 581}{ExtrQuery# [5000:1:582:0:0:100000:1] sh# 257 sz# 99743 c# 582}{ExtrQuery# [5000:1:583:0:0:100000:1] sh# 257 sz# 99743 c# 583}{ExtrQuery# [5000:1:584:0:0:100000:1] sh# 257 sz# 99743 c# 584}{ExtrQuery# [5000:1:585:0:0:100000:1] sh# 257 sz# 99743 c# 585}{ExtrQuery# [5000:1:586:0:0:100000:1] sh# 257 sz# 99743 c# 586}{ExtrQuery# [5000:1:587:0:0:100000:1] sh# 257 sz# 99743 c# 587}{ExtrQuery# [5000:1:588:0:0:100000:1] sh# 257 sz# 99743 c# 588}{ExtrQuery# [5000:1:589:0:0:100000:1] sh# 257 sz# 99743 c# 589}{ExtrQuery# [5000:1:590:0:0:100000:1] sh# 257 sz# 99743 c# 590}{ExtrQuery# [5000:1:591:0:0:100000:1] sh# 257 sz# 99743 c# 591}{ExtrQuery# [5000:1:592:0:0:100000:1] sh# 257 sz# 99743 c# 592}{ExtrQuery# [5000:1:593:0:0:100000:1] sh# 257 sz# 99743 c# 593}{ExtrQuery# [5000:1:594:0:0:100000:1] sh# 257 sz# 99743 c# 594}{ExtrQuery# [5000:1:595:0:0:100000:1] sh# 257 sz# 99743 c# 595}{ExtrQuery# [5000:1:596:0:0:100000:1] sh# 257 sz# 99743 c# 596}{ExtrQuery# [5000:1:597:0:0:100000:1] sh# 257 sz# 99743 c# 597}{ExtrQuery# [5000:1:598:0:0:100000:1] sh# 257 sz# 99743 c# 598}{ExtrQuery# [5000:1:599:0:0:100000:1] sh# 257 sz# 99743 c# 599}{ExtrQuery# [5000:1:600:0:0:100000:1] sh# 257 sz# 99743 c# 600}{ExtrQuery# [5000:1:601:0:0:100000:1] sh# 257 sz# 99743 c# 601}{ExtrQuery# [5000:1:602:0:0:100000:1] sh# 257 sz# 99743 c# 602}{ExtrQuery# [5000:1:603:0:0:100000:1] sh# 257 sz# 99743 c# 603}{ExtrQuery# [5000:1:604:0:0:100000:1] sh# 257 sz# 99743 c# 604}{ExtrQuery# [5000:1:605:0:0:100000:1] sh# 257 sz# 99743 c# 605}{ExtrQuery# [5000:1:606:0:0:100000:1] sh# 257 sz# 99743 c# 606}{ExtrQuery# [5000:1:607:0:0:100000:1] sh# 257 sz# 99743 c# 607}{ExtrQuery# [5000:1:608:0:0:100000:1] sh# 257 sz# 99743 c# 608}{ExtrQuery# [5000:1:609:0:0:100000:1] sh# 257 sz# 99743 c# 609}{ExtrQuery# [5000:1:610:0:0:100000:1] sh# 257 sz# 99743 c# 610}{ExtrQuery# [5000:1:611:0:0:100000:1] sh# 257 sz# 99743 c# 611}{ExtrQuery# [5000:1:612:0:0:100000:1] sh# 257 sz# 99743 c# 612}{ExtrQuery# [5000:1:613:0:0:100000:1] sh# 257 sz# 99743 c# 613}{ExtrQuery# [5000:1:614:0:0:100000:1] sh# 257 sz# 99743 c# 614}{ExtrQuery# [5000:1:615:0:0:100000:1] sh# 257 sz# 99743 c# 615}{ExtrQuery# [5000:1:616:0:0:100000:1] sh# 257 sz# 99743 c# 616}{ExtrQuery# [5000:1:617:0:0:100000:1] sh# 257 sz# 99743 c# 617}{ExtrQuery# [5000:1:618:0:0:100000:1] sh# 257 sz# 99743 c# 618}{ExtrQuery# [5000:1:619:0:0:100000:1] sh# 257 sz# 99743 c# 619}{ExtrQuery# [5000:1:620:0:0:100000:1] sh# 257 sz# 99743 c# 620}{ExtrQuery# [5000:1:621:0:0:100000:1] sh# 257 sz# 99743 c# 621}{ExtrQuery# [5000:1:622:0:0:100000:1] sh# 257 sz# 99743 c# 622}{ExtrQuery# [5000:1:623:0:0:100000:1] sh# 257 sz# 99743 c# 623}{ExtrQuery# [5000:1:624:0:0:100000:1] sh# 257 sz# 99743 c# 624}{ExtrQuery# [5000:1:625:0:0:100000:1] sh# 257 sz# 99743 c# 625}{ExtrQuery# [5000:1:626:0:0:100000:1] sh# 257 sz# 99743 c# 626}{ExtrQuery# [5000:1:627:0:0:100000:1] sh# 257 sz# 99743 c# 627}{ExtrQuery# [5000:1:628:0:0:100000:1] sh# 257 sz# 99743 c# 628}{ExtrQuery# [5000:1:629:0:0:100000:1] sh# 257 sz# 99743 c# 629}{ExtrQuery# [5000:1:630:0:0:100000:1] sh# 257 sz# 99743 c# 630}{ExtrQuery# [5000:1:631:0:0:100000:1] sh# 257 sz# 99743 c# 631}{ExtrQuery# [5000:1:632:0:0:100000:1] sh# 257 sz# 99743 c# 632}{ExtrQuery# [5000:1:633:0:0:100000:1] sh# 257 sz# 99743 c# 633}{ExtrQuery# [5000:1:634:0:0:100000:1] sh# 257 sz# 99743 c# 634}{ExtrQuery# [5000:1:635:0:0:100000:1] sh# 257 sz# 99743 c# 635}{ExtrQuery# [5000:1:636:0:0:100000:1] sh# 257 sz# 99743 c# 636}{ExtrQuery# [5000:1:637:0:0:100000:1] sh# 257 sz# 99743 c# 637}{ExtrQuery# [5000:1:638:0:0:100000:1] sh# 257 sz# 99743 c# 638}{ExtrQuery# [5000:1:639:0:0:100000:1] sh# 257 sz# 99743 c# 639}{ExtrQuery# [5000:1:640:0:0:100000:1] sh# 257 sz# 99743 c# 640}{ExtrQuery# [5000:1:641:0:0:100000:1] sh# 257 sz# 99743 c# 641}{ExtrQuery# [5000:1:642:0:0:100000:1] sh# 257 sz# 99743 c# 642}{ExtrQuery# [5000:1:643:0:0:100000:1] sh# 257 sz# 99743 c# 643}{ExtrQuery# [5000:1:644:0:0:100000:1] sh# 257 sz# 99743 c# 644}{ExtrQuery# [5000:1:645:0:0:100000:1] sh# 257 sz# 99743 c# 645}{ExtrQuery# [5000:1:646:0:0:100000:1] sh# 257 sz# 99743 c# 646}{ExtrQuery# [5000:1:647:0:0:100000:1] sh# 257 sz# 99743 c# 647}{ExtrQuery# [5000:1:648:0:0:100000:1] sh# 257 sz# 99743 c# 648}{ExtrQuery# [5000:1:649:0:0:100000:1] sh# 257 sz# 99743 c# 649}{ExtrQuery# [5000:1:650:0:0:100000:1] sh# 257 sz# 99743 c# 650}{ExtrQuery# [5000:1:651:0:0:100000:1] sh# 257 sz# 99743 c# 651}{ExtrQuery# [5000:1:652:0:0:100000:1] sh# 257 sz# 99743 c# 652}{ExtrQuery# [5000:1:653:0:0:100000:1] sh# 257 sz# 99743 c# 653}{ExtrQuery# [5000:1:654:0:0:100000:1] sh# 257 sz# 99743 c# 654}{ExtrQuery# [5000:1:655:0:0:100000:1] sh# 257 sz# 99743 c# 655}{ExtrQuery# [5000:1:656:0:0:100000:1] sh# 257 sz# 99743 c# 656}{ExtrQuery# [5000:1:657:0:0:100000:1] sh# 257 sz# 99743 c# 657}{ExtrQuery# [5000:1:658:0:0:100000:1] sh# 257 sz# 99743 c# 658}{ExtrQuery# [5000:1:659:0:0:100000:1] sh# 257 sz# 99743 c# 659}{ExtrQuery# [5000:1:660:0:0:100000:1] sh# 257 sz# 99743 c# 660}{ExtrQuery# [5000:1:661:0:0:100000:1] sh# 257 sz# 99743 c# 661}{ExtrQuery# [5000:1:662:0:0:100000:1] sh# 257 sz# 99743 c# 662}{ExtrQuery# [5000:1:663:0:0:100000:1] sh# 257 sz# 99743 c# 663}{ExtrQuery# [5000:1:664:0:0:100000:1] sh# 257 sz# 99743 c# 664}{ExtrQuery# [5000:1:665:0:0:100000:1] sh# 257 sz# 99743 c# 665}{ExtrQuery# [5000:1:666:0:0:100000:1] sh# 257 sz# 99743 c# 666}{ExtrQuery# [5000:1:667:0:0:100000:1] sh# 257 sz# 99743 c# 667}{ExtrQuery# [5000:1:668:0:0:100000:1] sh# 257 sz# 99743 c# 668}{ExtrQuery# [5000:1:669:0:0:100000:1] sh# 257 sz# 99743 c# 669}{ExtrQuery# [5000:1:670:0:0:100000:1] sh# 257 sz# 99743 c# 670}{ExtrQuery# [5000:1:671:0:0:100000:1] sh# 257 sz# 99743 c# 671}{ExtrQuery# [5000:1:672:0:0:17027:1] sh# 257 sz# 16770 c# 672} {MsgQoS} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0}; VDISK CAN NOT REPLY ON TEvVGet REQUEST |84.5%| [TM] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_large/unittest |84.5%| [TS] {RESULT} ydb/core/blobstorage/vdisk/defrag/ut/unittest |84.5%| [TM] {RESULT} ydb/core/tablet_flat/ut_large/unittest |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/ut/unittest >> StateStorageConfigValidation::ValidateConfigExplicitBad [GOOD] |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/metering/ut/unittest >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] |84.6%| [TS] {RESULT} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> Mvp::OpenIdConnectExchangeNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail >> Mirror3of4::ReplicationSmall |84.6%| [TS] {RESULT} ydb/core/metering/ut/unittest |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_double_indexed/unittest |84.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/metering/ut/unittest >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPoolAsync/DummyMemPool |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/config/validation/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut/unittest >> YamlConfigResolveUnique::AllTestConfigs [GOOD] Test command err: host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" "\/dev\/disk\/by-partlabel\/kikimr_nvme_02" host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" host_configs: - host_config_id: 1 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: NVME expected_slot_count: 9 - path: /dev/disk/by-partlabel/kikimr_nvme_02 type: NVME expected_slot_count: 9 - host_config_id: 2 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: SSD expected_slot_count: 9 hosts: - host: sas8-6954.search.yandex.net port: 19000 host_config_id: 1 - host: sas8-6955.search.yandex.net port: 19000 host_config_id: 2 item_config_generation: 0 |84.6%| [TS] {BAZEL_UPLOAD} ydb/library/yaml_config/ut/unittest >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlow |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/library/actors/core/harmonizer/ut/unittest >> ValueHistoryTests::TestAverageCalculationPrecision [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest >> ParseStats::ParseMultipleGraphsV2 [GOOD] |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/actors/ut/unittest >> TArrowPushDown::MatchSeveralRowGroups [GOOD] |84.6%| [TM] {BAZEL_UPLOAD} ydb/library/actors/core/harmonizer/ut/unittest |84.6%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |84.6%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/s3/actors/ut/unittest >> ArrowInferenceTest::csv_simple >> Mvp::OpenIdConnectFullAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax >> ArrowInferenceTest::csv_simple [GOOD] >> ArrowInferenceTest::tsv_simple [GOOD] >> ArrowInferenceTest::tsv_empty [GOOD] >> ArrowInferenceTest::broken_json |84.6%| [TS] {RESULT} ydb/core/config/validation/ut/unittest |84.6%| [TM] {RESULT} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |84.6%| [TS] {RESULT} ydb/library/yql/providers/s3/actors/ut/unittest >> ArrowInferenceTest::broken_json [GOOD] >> ArrowInferenceTest::empty_json_each_row [GOOD] >> ArrowInferenceTest::empty_json_list [GOOD] >> ArrowInferenceTest::broken_json_list [GOOD] >> XmlBuilderTest::WritesProperly [GOOD] >> XmlBuilderTest::MacroBuilder [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/common/ut/unittest >> TOutOfSpaceStateTests::TestGlobal [GOOD] |84.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/common/ut/unittest >> Mvp::OpenIdConnectFullAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow |84.6%| [TM] {RESULT} ydb/library/actors/core/harmonizer/ut/unittest >> TCollectingS3ListingStrategyTests::IfNoIssuesOccursShouldReturnCollectedPaths [GOOD] >> TCollectingS3ListingStrategyTests::IfThereAreMoreRecordsThanSpecifiedByLimitShouldReturnError [GOOD] >> TCollectingS3ListingStrategyTests::IfAnyIterationReturnIssueThanWholeStrategyShouldReturnIt [GOOD] >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax |84.6%| [TS] {RESULT} ydb/library/yaml_config/ut/unittest >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPoolAsyncRandomOrderDealloc/SlotMemPool >> ExternalDataSourceTest::ValidateName [GOOD] >> ExternalDataSourceTest::ValidatePack [GOOD] >> ExternalDataSourceTest::ValidateAuth [GOOD] >> ExternalDataSourceTest::ValidateParameters [GOOD] >> ExternalDataSourceTest::ValidateHasExternalTable [GOOD] >> ExternalDataSourceTest::ValidateProperties [GOOD] >> ExternalDataSourceTest::ValidateLocation [GOOD] >> ExternalSourceBuilderTest::ValidateName [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithoutCondition [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithCondition [GOOD] >> ExternalSourceBuilderTest::ValidateUnsupportedField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldOnCondition [GOOD] >> IcebergDdlTest::HiveCatalogWithS3Test [GOOD] >> IcebergDdlTest::HadoopCatalogWithS3Test [GOOD] >> ObjectStorageTest::SuccessValidation [GOOD] >> ObjectStorageTest::FailedCreate [GOOD] >> ObjectStorageTest::FailedValidation [GOOD] >> ObjectStorageTest::FailedJsonListValidation [GOOD] >> ObjectStorageTest::FailedOptionalTypeValidation [GOOD] >> ObjectStorageTest::WildcardsValidation [GOOD] >> ObjectStorageTest::FailedPartitionedByValidation [GOOD] >> BlobDepotWithTestShard::PlainGroup [GOOD] |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/http/ut/unittest >> XmlBuilderTest::MacroBuilder [GOOD] |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/ymq/http/ut/unittest |84.6%| [TS] {RESULT} ydb/core/ymq/http/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/object_storage/inference/ut/gtest >> ArrowInferenceTest::broken_json_list [GOOD] Test command err: {
: Error: couldn't open csv/tsv file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } 2026-01-08T15:25:00.943042Z 1 00h00m00.000000s :OBJECT_STORAGE_INFERENCINATOR DEBUG: TArrowInferencinator: [1:6:6]. HandleFileError: {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: Invalid: JSON parse error: Invalid value. in row 0, code: 1001 } >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid >> TImportantConsumerOffsetTrackerTest::EmptyConsumersList [GOOD] >> TImportantConsumerOffsetTrackerTest::Unbound [GOOD] >> TImportantConsumerOffsetTrackerTest::Expired [GOOD] >> TImportantConsumerOffsetTrackerTest::UnboundAndExpired [GOOD] >> TImportantConsumerOffsetTrackerTest::ReadUnboundAndExpired [GOOD] >> TImportantConsumerOffsetTrackerTest::ReadUnboundAndWaiting300 [GOOD] >> TImportantConsumerOffsetTrackerTest::ReadUnboundAndWaiting400 [GOOD] >> TImportantConsumerOffsetTrackerTest::ReadUnboundAndWaiting500 [GOOD] >> TImportantConsumerOffsetTrackerTest::Waiting400And500 [GOOD] >> TImportantConsumerOffsetTrackerTest::Waiting400AndRead500 [GOOD] >> TImportantConsumerOffsetTrackerTest::ExactMatchSingleConsumerMaxRetention [GOOD] >> TImportantConsumerOffsetTrackerTest::ExactMatchSingleConsumerFiniteRetention [GOOD] >> TImportantConsumerOffsetTrackerTest::ExactMatchMultipleConsumers [GOOD] >> TImportantConsumerOffsetTrackerTest::MultipleExactMatches [GOOD] >> TImportantConsumerOffsetTrackerTest::UnboundKeepNextBlob [GOOD] >> TDeduplicatorTest::AddMessage [GOOD] >> TDeduplicatorTest::AddTwoMessages [GOOD] >> TDeduplicatorTest::AddDeduplicatedMessages [GOOD] >> TDeduplicatorTest::AddTwoMessages_DifferentTime_OneBucket [GOOD] >> TDeduplicatorTest::AddManyMessages_SameTime_DifferentBucket [GOOD] |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/external_sources/object_storage/inference/ut/gtest >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/provider/ut/unittest >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |84.6%| [TS] {RESULT} ydb/core/external_sources/object_storage/inference/ut/gtest |84.6%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/s3/provider/ut/unittest |84.6%| [TS] {RESULT} ydb/library/yql/providers/s3/provider/ut/unittest |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_testshard/unittest >> BlobDepotWithTestShard::PlainGroup [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed |84.6%| [TM] {RESULT} ydb/core/blobstorage/ut_testshard/unittest |84.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_testshard/unittest >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed [GOOD] >> Mvp::OpenIdConnectAllowedHostsList >> TAllocatorSuite/WithAllPools::AllocMemoryWithMemPoolAsyncRandomOrderDealloc/DummyMemPool [SKIPPED] >> TAllocatorSuite/WithAllPools::MemRegRcBuf/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::MemRegRcBuf/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::MemRegRcBufSubstr/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::MemRegRcBufSubstr/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::MemRegRope/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::MemRegRope/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::FullChunkAllocationMultiThread/SlotMemPool |84.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/external_sources/ut/unittest >> ObjectStorageTest::FailedPartitionedByValidation [GOOD] >> Mvp::OpenIdConnectAllowedHostsList [GOOD] >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource ------- [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/pqtablet/partition/ut/gtest >> TDeduplicatorTest::AddManyMessages_SameTime_DifferentBucket [GOOD] Test command err: Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000001 Append WAL: e0000000001|0000000000000002 |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/external_sources/ut/unittest |84.6%| [TS] {BAZEL_UPLOAD} ydb/core/persqueue/pqtablet/partition/ut/gtest |84.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie |84.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TMicrosecondsSlidingWindow::Basic [GOOD] >> JsonEnvelopeTest::Simple [GOOD] >> JsonEnvelopeTest::NoReplace [GOOD] >> JsonEnvelopeTest::ArrayItem [GOOD] >> JsonEnvelopeTest::Escape [GOOD] >> JsonEnvelopeTest::BinaryData [GOOD] >> ClientBlobSerialization::SerializeAndDeserializeAllScenarios [GOOD] >> TTypeCodecsTest::TestBoolCodec |84.7%| [TS] {RESULT} ydb/core/persqueue/pqtablet/partition/ut/gtest |84.7%| [TS] {RESULT} ydb/core/external_sources/ut/unittest >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie [GOOD] >> Mvp::OidcImpersonationStartFlow >> TTypeCodecsTest::TestBoolCodec [GOOD] >> TTypeCodecsTest::TestFixedLenCodec [GOOD] >> TTypeCodecsTest::TestVarLenCodec [GOOD] >> TTypeCodecsTest::TestVarIntCodec [GOOD] >> TTypeCodecsTest::TestZigZagCodec [GOOD] >> TTypeCodecsTest::TestDeltaVarIntCodecAndRev [GOOD] >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] >> TaskGroup::ImmediateReturn |84.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TaskGroup::ImmediateReturn [GOOD] >> TaskGroup::TaskDiscardedBeforeStart [GOOD] >> TaskGroup::TaskStartNotRecursive [GOOD] >> TaskGroup::TaskImmediateReturnBeforeNextTask [GOOD] >> TaskGroup::TaskThrowsImmediately [GOOD] >> TaskGroup::TaskThrowsFromCoroutine [GOOD] >> TaskGroup::TaskAwaitedBeforeReturn [GOOD] >> TaskGroup::TaskNotCancelledUntilGroupReturns [GOOD] >> TaskGroup::TaskCancelAfterGroupCancel >> Mvp::OidcImpersonationStartFlow [GOOD] >> Mvp::OidcImpersonationStartNeedServiceAccountId >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource >> TaskGroup::TaskCancelAfterGroupCancel [GOOD] >> TaskGroup::TaskAwaitCancelThenRetry [GOOD] >> TaskGroup::TestSingleStepTesting >> TaskGroup::TestSingleStepTesting [GOOD] >> Async::AsyncBootstrap [GOOD] >> Async::AwaiterThrowWithoutSuspend [GOOD] >> Async::AwaiterResume [GOOD] >> Async::StdAwaiterResume [GOOD] >> Async::StdAwaiterResumeOutsideActorSystem [GOOD] >> Async::AwaiterPassAwayThenResume >> Mvp::OidcImpersonationStartNeedServiceAccountId [GOOD] >> Mvp::OidcImpersonationStopFlow >> TAllocatorSuite/WithAllPools::FullChunkAllocationMultiThread/DummyMemPool [SKIPPED] >> TAllocatorSuite/WithAllPools::SmallAllocationMultiThread/SlotMemPool >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource [GOOD] >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSourcesAndWithChannel >> Async::AwaiterPassAwayThenResume [GOOD] >> Async::StdAwaiterPassAwayThenResume [GOOD] >> Async::StdAwaiterPassAwayThenResumeOutsideActorSystem [GOOD] >> Async::AwaiterPassAwayThenCancel [GOOD] >> Async::StdAwaiterPassAwayThenCancel [GOOD] >> Async::StdAwaiterPassAwayThenCancelOutsideActorSystem >> ReadBatcher::Range [GOOD] >> TpchQueries::ScaleFactor >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSourcesAndWithChannel [GOOD] >> TCheckpointCoordinatorTests::ShouldAllSnapshots >> Mvp::OidcImpersonationStopFlow [GOOD] >> Mvp::OidcImpersonatedAccessToProtectedResource >> Async::StdAwaiterPassAwayThenCancelOutsideActorSystem [GOOD] >> Async::AwaiterNotSuspendingAfterPassAway [GOOD] >> Async::StdAwaiterNotSuspendingAfterPassAway [GOOD] >> Async::AwaiterWithoutCancelSupportSuspendAfterPassAwayThenResume [GOOD] >> Async::StdAwaiterWithoutCancelSupportSuspendAfterPassAwayThenResume [GOOD] >> Async::StdAwaiterWithoutCancelSupportSuspendAfterPassAwayThenResumeOutsideActorSystem [GOOD] >> Async::StdAwaiterResumeOutsideActorSystemBeforeStop [GOOD] >> Async::StdAwaiterResumeOutsideActorSystemAfterStop [GOOD] >> Async::StdAwaiterPassAwayThenResumeOutsideActorSystemBeforeStop [GOOD] >> Async::StdAwaiterPassAwayThenResumeOutsideActorSystemAfterStop [GOOD] >> Async::StdAwaiterPassAwayThenConfirmOutsideActorSystemBeforeStop >> TestS3UrlEscape::EscapeEscapedForce [GOOD] >> TestS3UrlEscape::EscapeUnescapeForceRet [GOOD] >> TestS3UrlEscape::EscapeAdditionalSymbols [GOOD] >> TestUrlBuilder::UriOnly [GOOD] >> TestUrlBuilder::Basic [GOOD] >> TestUrlBuilder::BasicWithEncoding [GOOD] >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] >> Async::StdAwaiterPassAwayThenConfirmOutsideActorSystemBeforeStop [GOOD] >> Async::StdAwaiterPassAwayThenConfirmOutsideActorSystemAfterStop [GOOD] >> Async::StdAwaiterWithoutCancelSupportSuspendAfterPassAwayThenResumeOutsideActorSystemBeforeStop [GOOD] >> Async::StdAwaiterWithoutCancelSupportSuspendAfterPassAwayThenResumeOutsideActorSystemAfterStop [GOOD] >> Async::StdAwaiterNoResumeOnShutdown [GOOD] |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/log_backend/ut/unittest >> JsonEnvelopeTest::BinaryData [GOOD] >> Mvp::OidcImpersonatedAccessToProtectedResource [GOOD] >> Mvp::OidcImpersonatedAccessNotAuthorized >> TCheckpointCoordinatorTests::ShouldAllSnapshots [GOOD] >> TCheckpointCoordinatorTests::Should2Increments1Snapshot >> CallbackCoroutine::Resume [GOOD] >> CallbackCoroutine::Throwing [GOOD] >> Cancellation::CancelWithResult [GOOD] >> Cancellation::CancelWithNoResult [GOOD] >> Cancellation::AlreadyCancelledScope [GOOD] >> Cancellation::AlreadyCancelledCaller [GOOD] >> Cancellation::CancelScopeThenCaller |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/common/ut/unittest >> TMicrosecondsSlidingWindow::Basic [GOOD] >> Cancellation::CancelScopeThenCaller [GOOD] >> Cancellation::CancelCallerThenScope [GOOD] >> Cancellation::SuspendedScopeTearDown [GOOD] >> Cancellation::CancelledScopeTearDown [GOOD] >> Cancellation::Shielded [GOOD] >> Cancellation::ShieldedTearDown |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/log_backend/ut/unittest |84.7%| [TS] {RESULT} ydb/core/log_backend/ut/unittest |84.7%| [TS] {RESULT} ydb/core/persqueue/common/ut/unittest >> TAllocatorSuite32::SlotPoolLimit >> TCheckpointCoordinatorTests::Should2Increments1Snapshot [GOOD] >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved >> Cancellation::ShieldedTearDown [GOOD] >> Cancellation::ShieldedCancel [GOOD] >> Cancellation::InterceptCancellationVoidThenResume [GOOD] >> Cancellation::InterceptCancellationVoidThenCancel [GOOD] >> Cancellation::InterceptCancellationTrueThenTearDown [GOOD] >> Cancellation::InterceptCancellationFalseThenResume [GOOD] >> Cancellation::InterceptCancellationBoolThrow |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/persqueue/common/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/pqtablet/blob/ut/unittest >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] Test command err: payloadSize 0 totalSize 100 partsCount 100 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === payloadSize 10 totalSize 100 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 524288 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 10 totalSize 524288 partsCount ... Size 1048576 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === payloadSize 524288 totalSize 1048576 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 0 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 0 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 100 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 10 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 0 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 1 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 0 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 1 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 100 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 100 hashSize 255 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 1 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 0 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 100 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === payloadSize 524288 totalSize 10485760 partsCount 40 partKeySize 256 hashSize 255 partNo 10 === Size: 128 Create chunk: 0.000040s Read by index: 0.000021s Iterate: 0.000021s Size: 252 Create chunk: 0.000056s Read by index: 0.000026s Iterate: 0.000027s Size: 8002 Create chunk: 0.000158s Read by index: 0.000033s Iterate: 0.000096s Size: 8256 Create chunk: 0.000230s Read by index: 0.000049s Iterate: 0.000115s Size: 8532 Create chunk: 0.000109s Read by index: 0.000061s Iterate: 0.000061s Size: 7769 Create chunk: 0.000110s Read by index: 0.000066s Iterate: 0.000062s Size: 2853 Create chunk: 0.000102s Read by index: 0.000101s Iterate: 0.000059s Size: 2419 Create chunk: 0.000115s Read by index: 0.000114s Iterate: 0.000060s Size: 2929 Create chunk: 0.000117s Read by index: 0.000119s Iterate: 0.000076s Size: 2472 Create chunk: 0.000133s Read by index: 0.000119s Iterate: 0.000060s Size: 1887 Create chunk: 0.000106s Read by index: 0.000121s Iterate: 0.000062s Size: 1658 Create chunk: 0.000121s Read by index: 0.000135s Iterate: 0.000063s Size: 1889 Create chunk: 0.000105s Read by index: 0.000104s Iterate: 0.000073s Size: 1660 Create chunk: 0.000100s Read by index: 0.000119s Iterate: 0.000062s Size: 2407 Create chunk: 0.000108s Read by index: 0.000129s Iterate: 0.000063s Size: 2061 Create chunk: 0.000117s Read by index: 0.000120s Iterate: 0.000063s |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/persqueue/pqtablet/blob/ut/unittest >> TAllocatorSuite32::SlotPoolLimit [FAIL] >> TAllocatorSuite32::SlotPoolHugeAlloc >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] >> Mvp::OpenIdConnectStreamingRequestResponseYandex >> Cancellation::InterceptCancellationBoolThrow [GOOD] >> Cancellation::InterceptCancellationAsyncVoidOnCancelFinishThenResume [GOOD] >> Cancellation::InterceptCancellationAsyncVoidOnCancelFinishThenUnwind [GOOD] >> Cancellation::InterceptCancellationAsyncVoidResumeBeforeOnCancelFinish [GOOD] >> Cancellation::InterceptCancellationAsyncVoidResumeBeforeOnCancelUnwind [GOOD] >> Cancellation::InterceptCancellationAsyncVoidSuspendedTearDown [GOOD] >> Cancellation::InterceptCancellationAsyncVoidThrowsOnCall [GOOD] >> Cancellation::InterceptCancellationAsyncVoidThrowsOnStart >> TpchQueries::ScaleFactor [GOOD] >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved [GOOD] >> Cancellation::InterceptCancellationAsyncVoidThrowsOnStart [GOOD] >> TCheckpointCoordinatorTests::ShouldDoNothingIfNoIngressTasks >> Cancellation::InterceptCancellationAsyncVoidThrowsResumeBeforeStart [GOOD] >> Cancellation::InterceptCancellationAsyncTrue [GOOD] >> Cancellation::InterceptCancellationAsyncFalse [GOOD] >> Continuation::ResumeFromCallback [GOOD] >> Continuation::Resume [GOOD] >> Continuation::DropFromCallback [GOOD] >> Continuation::Drop >> Mvp::OpenIdConnectStreamingRequestResponseYandex [GOOD] >> Mvp::OpenIdConnectStreamingRequestResponseNebius >> Mvp::TokenatorGetMetadataTokenGood >> Continuation::Drop [GOOD] >> Continuation::AlreadyCancelled [GOOD] >> Continuation::Cancel [GOOD] >> Continuation::Throw [GOOD] >> Continuation::NoResumeOnShutdown [GOOD] >> Event::NotifyOne [GOOD] >> Event::NotifyAll [GOOD] >> Event::AlreadyCancelled >> TPGTest::TestLogin |84.7%| [TS] {RESULT} ydb/core/persqueue/pqtablet/blob/ut/unittest >> Mvp::TokenatorGetMetadataTokenGood [GOOD] >> Mvp::TokenatorRefreshMetadataTokenGood >> Event::AlreadyCancelled [GOOD] >> Event::Cancel [GOOD] >> Event::CancelAfterNotify [GOOD] >> Event::Detach [GOOD] >> Event::NoResumeOnShutdown [GOOD] >> Timeout::Timeout >> TPGTest::TestLogin [GOOD] >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] >> TAllocatorSuite/WithAllPools::SmallAllocationMultiThread/DummyMemPool [SKIPPED] >> TAllocatorSuite/WithAllPools::ThreadsDestroy/SlotMemPool |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/common/ut/unittest >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] |84.7%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/s3/common/ut/unittest |84.7%| [TS] {RESULT} ydb/library/yql/providers/s3/common/ut/unittest >> Mvp::OpenIdConnectStreamingRequestResponseNebius [GOOD] >> Mvp::OidcWhoami200 >> Timeout::Timeout [GOOD] >> Timeout::TimeoutResumeImmediately [GOOD] >> Timeout::TimeoutResumeImmediatelyAfterPassAway [GOOD] >> Timeout::TimeoutUnwindOnSuspendAfterPassAway [GOOD] >> Timeout::ResumeBeforeTimeout >> Timeout::ResumeBeforeTimeout [GOOD] >> Timeout::ResumeAfterTimeout [GOOD] >> Timeout::ResumeAfterTimeoutAndPassAway >> TAllocatorSuite32::SlotPoolHugeAllocAfterSmall >> Mvp::OidcWhoami200 [GOOD] >> Mvp::OidcWhoamiServiceAccount200 >> Timeout::ResumeAfterTimeoutAndPassAway [GOOD] >> Timeout::CancelAfterTimeoutAndPassAway [GOOD] >> Timeout::ResumeAfterPassAwayAndTimeout >> GroupStress::Test [GOOD] >> Timeout::ResumeAfterPassAwayAndTimeout [GOOD] >> Timeout::CancelAfterPassAwayAndTimeout [GOOD] >> Sleep::ZeroDelay [GOOD] >> Sleep::InfiniteDelay [GOOD] >> Sleep::PassAwayBeforeYield >> Sleep::PassAwayBeforeYield [GOOD] >> Sleep::PassAwayAfterYield [GOOD] >> Sleep::Delay [GOOD] >> Sleep::DelayCancelWhileScheduled >> Sleep::DelayCancelWhileScheduled [GOOD] >> Sleep::DelayDestroyedBeforeDelivery [GOOD] >> WaitForEvent::EventDelivered [GOOD] >> WaitForEvent::Cancel [GOOD] >> WaitForEvent::CancelledAlready [GOOD] >> WaitForEvent::CancelThenDeliver [GOOD] >> CachingDnsResolver::UnusableResolver [GOOD] >> CachingDnsResolver::ResolveCaching >> Mvp::OidcWhoamiServiceAccount200 [GOOD] >> Mvp::OidcWhoamiBadIam200 |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/workload/tpch/ut/unittest >> TpchQueries::ScaleFactor [GOOD] |84.7%| [TS] {RESULT} ydb/library/workload/tpch/ut/unittest |84.7%| [TS] {BAZEL_UPLOAD} ydb/library/workload/tpch/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/pgproxy/ut/unittest >> TPGTest::TestLogin [GOOD] Test command err: 2026-01-08T15:25:05.379119Z :PGWIRE INFO: sock_listener.cpp:66: Listening on [::]:5466 2026-01-08T15:25:05.387098Z :PGWIRE DEBUG: pg_connection.cpp:61: (#13,[::1]:47466) incoming connection opened 2026-01-08T15:25:05.387347Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:47466) -> [1] 'i' "Initial" Size(15) protocol(0x00000300) user=user 2026-01-08T15:25:05.387595Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:47466) <- [1] 'R' "Auth" Size(4) OK >> TAllocatorSuite32::SlotPoolHugeAllocOtherThreadAfterSmall |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/pgproxy/ut/unittest >> Mvp::OidcWhoamiBadIam200 [GOOD] >> Mvp::OidcWhoamiBadYdb200 >> CachingDnsResolver::ResolveCaching [GOOD] >> CachingDnsResolver::ResolveCachingV4 |84.7%| [TS] {RESULT} ydb/core/pgproxy/ut/unittest |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_group/unittest >> GroupStress::Test [GOOD] |84.7%| [TM] {RESULT} ydb/core/blobstorage/ut_group/unittest |84.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_group/unittest >> TPartitionKeyRangeSequenceTest::InvalidFullCoverHi [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidOverlapLong [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidContains [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidGap [GOOD] >> TPartitionKeyRangeSequenceTest::ValidFivePartitions [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidFullCoverLo [GOOD] >> TPartitionKeyRangeSequenceTest::ValidSinglePartition [GOOD] >> TPartitionKeyRangeSequenceTest::ValidSequence [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidOverlap [GOOD] >> TPartitionKeyRangeSequenceTest::EmptyInput [GOOD] >> TPartitionKeyRangeSequenceTest::InvalidOrder [GOOD] >> CachingDnsResolver::ResolveCachingV4 [GOOD] >> CachingDnsResolver::EventualTimeout >> TAllocatorSuite/WithAllPools::ThreadsDestroy/DummyMemPool >> Mvp::OidcWhoamiBadYdb200 [GOOD] >> Mvp::OidcWhoamiBadYdbServiceAccount200 |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::Range [GOOD] |84.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> CachingDnsResolver::EventualTimeout [GOOD] >> CachingDnsResolver::MultipleRequestsAndHosts >> CachingDnsResolver::MultipleRequestsAndHosts [GOOD] >> CachingDnsResolver::DisabledIPv6 [GOOD] >> CachingDnsResolver::DisabledIPv4 >> TestFileCache::Find [GOOD] >> TestFileCache::Evict >> TestFileCache::Add [GOOD] >> TestFileCache::Create [GOOD] >> ActionParsingTest::ToAndFromStringAreConsistent [GOOD] >> ActionParsingTest::ActionsForQueueTest [GOOD] >> ActionParsingTest::BatchActionTest [GOOD] >> ActionParsingTest::ActionsForMessageTest [GOOD] >> ActionParsingTest::FastActionsTest [GOOD] >> HttpCountersTest::CountersAggregationTest [GOOD] >> LazyCounterTest::LazyCounterTest [GOOD] >> LazyCounterTest::AggregationLazyTest >> CachingDnsResolver::DisabledIPv4 [GOOD] >> CachingDnsResolver::PoisonPill [GOOD] >> DnsResolver::ResolveLocalHost [GOOD] >> DnsResolver::ResolveYandexRu >> Mvp::OidcWhoamiBadYdbServiceAccount200 [GOOD] >> Mvp::OidcWhoamiNoInfo500 >> TestFileCache::Evict [GOOD] >> TestFileCache::AcquireRelease [GOOD] >> TestFileCache::Acquire [GOOD] >> TestFileCache::AcquireSingleFile2Times [GOOD] >> TestFileCache::ContainsReleased >> LazyCounterTest::AggregationLazyTest [GOOD] >> LazyCounterTest::AggregationNonLazyTest [GOOD] >> LazyCounterTest::HistogramAggregationTest [GOOD] >> MessageAttributeValidationTest::MessageAttributeValidationTest [GOOD] >> MessageBodyValidationTest::MessageBodyValidationTest [GOOD] >> MeteringCountersTest::CountersAggregationTest [GOOD] >> NameValidationTest::NameValidationTest [GOOD] >> QueueAttributes::BasicStdTest [GOOD] >> QueueAttributes::BasicFifoTest [GOOD] >> QueueAttributes::BasicClampTest [GOOD] >> QueueCountersTest::InsertCountersTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithFolderTest [GOOD] >> QueueCountersTest::CountersAggregationTest [GOOD] >> QueueCountersTest::CountersAggregationCloudTest >> DnsResolver::ResolveYandexRu [GOOD] >> DnsResolver::GetAddrYandexRu >> TestFileCache::ContainsReleased [GOOD] >> TestFileCache::AddAfterRemoveAcquired [GOOD] >> QueueCountersTest::CountersAggregationCloudTest [GOOD] >> RedrivePolicy::RedrivePolicyValidationTest [GOOD] >> RedrivePolicy::RedrivePolicyToJsonTest [GOOD] >> RedrivePolicy::RedrivePolicyArnValidationTest [GOOD] >> SecureProtobufPrinterTest::MessageBody [GOOD] >> SecureProtobufPrinterTest::Tokens [GOOD] >> StringValidationTest::IsAlphaNumAndPunctuationTest [GOOD] >> UserCountersTest::DisableCountersTest >> DnsResolver::GetAddrYandexRu [GOOD] >> UserCountersTest::DisableCountersTest [GOOD] >> DnsResolver::ResolveTimeout >> UserCountersTest::RemoveUserCountersTest [GOOD] >> UserCountersTest::CountersAggregationTest [GOOD] >> TAllocatorSuite32::AllocationRandSizeWithReclaimOneThread [FAIL] >> TAllocatorSuite32::AllocationWithReclaimTwoThreads >> Mvp::OidcWhoamiNoInfo500 [GOOD] >> Mvp::OidcWhoamiForward307 >> HttpProxy::BasicParsing [GOOD] >> HttpProxy::HeaderParsingError_Request [GOOD] >> HttpProxy::HeaderParsingError_Response [GOOD] >> HttpProxy::GetWithSpecifiedContentType [GOOD] >> HttpProxy::BasicParsingChunkedBodyRequest [GOOD] >> HttpProxy::BasicParsingChunkedBigBodyRequest >> TBatchedVecTest::TestToStringInt [GOOD] >> TBatchedVecTest::TestOutputTOutputType [GOOD] >> BufferWithGaps::Basic [GOOD] >> BufferWithGaps::IsReadable [GOOD] >> PtrTest::Test1 [GOOD] >> Mvp::OidcWhoamiForward307 [GOOD] >> Mvp::OidcYandexIgnoresWhoamiExtension >> HttpProxy::BasicParsingChunkedBigBodyRequest [GOOD] >> HttpProxy::BasicParsingBigHeadersRequest [GOOD] >> HttpProxy::BrokenParsingMethod [GOOD] >> HttpProxy::BrokenParsingChunkedBodyRequest [GOOD] >> HttpProxy::BasicPost [GOOD] >> HttpProxy::BasicParsingChunkedBodyResponse [GOOD] >> HttpProxy::InvalidParsingChunkedBody [GOOD] >> HttpProxy::AdvancedParsingChunkedBody [GOOD] >> HttpProxy::CreateCompressedResponse [GOOD] >> HttpProxy::BasicPartialParsing [GOOD] >> HttpProxy::BasicPartialParsingChunkedBody [GOOD] >> HttpProxy::BasicParsingContentLength0 [GOOD] >> HttpProxy::AdvancedParsing [GOOD] >> HttpProxy::AdvancedPartialParsing [GOOD] >> HttpProxy::BasicRenderBodyWithHeadersAndCookies [GOOD] >> HttpProxy::BasicRenderOutgoingResponse [GOOD] >> HttpProxy::BasicRunning4 |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/public/partition_key_range/ut/unittest >> TPartitionKeyRangeSequenceTest::InvalidOrder [GOOD] |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/async/ut/unittest >> WaitForEvent::CancelThenDeliver [GOOD] >> Backpressure::MonteCarlo >> HttpProxy::BasicRunning4 [GOOD] >> HttpProxy::BasicRunning6 >> Mvp::OidcYandexIgnoresWhoamiExtension [GOOD] >> Mvp::GetAddressWithoutPort [GOOD] >> HttpProxy::BasicRunning6 [GOOD] >> HttpProxy::TlsRunning |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/persqueue/public/partition_key_range/ut/unittest |84.7%| [TS] {BAZEL_UPLOAD} ydb/library/actors/async/ut/unittest |84.7%| [TS] {RESULT} ydb/core/persqueue/public/partition_key_range/ut/unittest >> TAllocatorSuite/WithAllPools::PageAlligned/SlotMemPool >> EncryptedFileSerializerTest::SerializeWholeFileAtATime [GOOD] >> EncryptedFileSerializerTest::WrongParametersForSerializer [GOOD] >> EncryptedFileSerializerTest::WrongParametersForDeserializer [GOOD] >> EncryptedFileSerializerTest::SplitOnBlocks >> DnsResolver::ResolveTimeout [GOOD] >> DnsResolver::ResolveGracefulStop [GOOD] >> OnDemandDnsResolver::ResolveLocalHost |84.7%| [TS] {RESULT} ydb/library/actors/async/ut/unittest |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/runtime/ut/unittest >> TestFileCache::AddAfterRemoveAcquired [GOOD] >> HttpProxy::TlsRunning [GOOD] >> HttpProxy::TooLongURL >> OnDemandDnsResolver::ResolveLocalHost [GOOD] >> TGenerateQueueIdTests::MakeQueueIdBasic [GOOD] >> TParseParamsTests::CreateUser [GOOD] >> TParseParamsTests::ChangeMessageVisibilityBatchRequest [GOOD] >> TParseParamsTests::DeleteMessageBatchRequest [GOOD] >> TParseParamsTests::MessageBody [GOOD] >> TParseParamsTests::SendMessageBatchRequest [GOOD] >> TParseParamsTests::DeleteQueueBatchRequest [GOOD] >> TParseParamsTests::PurgeQueueBatchRequest [GOOD] >> TParseParamsTests::GetQueueAttributesBatchRequest [GOOD] >> TParseParamsTests::UnnumberedAttribute [GOOD] >> TParseParamsTests::UnnumberedAttributeName [GOOD] >> TParseParamsTests::FailsOnInvalidDeduplicationId [GOOD] >> TParseParamsTests::FailsOnInvalidGroupId [GOOD] >> TParseParamsTests::FailsOnInvalidReceiveRequestAttemptId [GOOD] >> TParseParamsTests::FailsOnInvalidMaxNumberOfMessages [GOOD] >> TParseParamsTests::FailsOnInvalidWaitTime [GOOD] >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] >> ArrowTest::Basic [GOOD] >> SizeCalcer::SimpleStrings [GOOD] >> SizeCalcer::DictionaryStrings [GOOD] >> SizeCalcer::ZeroSimpleStrings [GOOD] >> SizeCalcer::ZeroDictionaryStrings [GOOD] >> SizeCalcer::SimpleInt64 [GOOD] >> SizeCalcer::SimpleTimestamp [GOOD] >> SizeCalcer::NestedFixedSizeList |84.7%| [TS] {RESULT} ydb/library/yql/providers/dq/runtime/ut/unittest |84.7%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/dq/runtime/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] Test command err: STEP 1 STEP 2 StringToId# 63 numItems# 110271 >> HttpProxy::TooLongURL [GOOD] >> HttpProxy::TooLongHeader >> EncryptedFileSerializerTest::SplitOnBlocks [GOOD] >> EncryptedFileSerializerTest::EmptyFile [GOOD] >> EncryptedFileSerializerTest::ReadPartial [GOOD] >> EncryptedFileSerializerTest::DeleteLastByte [GOOD] >> EncryptedFileSerializerTest::AddByte [GOOD] >> EncryptedFileSerializerTest::RemoveLastBlock [GOOD] >> EncryptedFileSerializerTest::ChangeAnyByte |84.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hullop/ut/unittest |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/base/ut/unittest >> UserCountersTest::CountersAggregationTest [GOOD] >> HttpProxy::TooLongHeader [GOOD] >> HttpProxy::HeaderWithoutAColon >> SizeCalcer::NestedFixedSizeList [GOOD] >> SizeCalcer::NestedList |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/ymq/base/ut/unittest |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/base/ut/gtest >> PtrTest::Test1 [GOOD] >> HttpProxy::HeaderWithoutAColon [GOOD] >> HttpProxy::TooManyRequests >> EncryptedFileSerializerTest::ChangeAnyByte [GOOD] >> SizeCalcer::NestedList [GOOD] >> EncryptedFileSerializerTest::BigHeaderSize [GOOD] >> EncryptedFileSerializerTest::BigBlockSize [GOOD] >> EncryptedFileSerializerTest::RestoreFromState [GOOD] >> EncryptedFileSerializerTest::IVSerialization [GOOD] >> PathsNormalizationTest::NormalizeItemPath [GOOD] >> PathsNormalizationTest::NormalizeItemPrefix [GOOD] >> PathsNormalizationTest::NormalizeExportPrefix [GOOD] >> SizeCalcer::NestedStruct [GOOD] >> SizeCalcer::NestedSparseUnion >> TDqHashCombineTest::TestBlockModeNoInput >> HttpProxy::TooManyRequests [GOOD] >> HttpProxy::ChunkedResponse1 |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/blobstorage/base/ut/gtest |84.7%| [TS] {RESULT} ydb/core/blobstorage/base/ut/gtest |84.7%| [TS] {RESULT} ydb/core/ymq/base/ut/unittest >> SizeCalcer::NestedSparseUnion [GOOD] >> SplitterTest::SimpleNoFrac [GOOD] >> SplitterTest::Simple [GOOD] >> SplitterTest::SimpleBig [GOOD] >> SplitterTest::Generic >> HttpProxy::ChunkedResponse1 [GOOD] >> HttpProxy::ChunkedResponse2 >> TDqHashCombineTest::TestBlockModeNoInput [GOOD] >> TDqHashCombineTest::TestBlockModeSingleRow >> HttpRouter::Basic [GOOD] >> ClosedIntervalSet::Union >> HttpProxy::ChunkedResponse2 [GOOD] >> HttpProxy::ChunkedResponse3 >> TDqHashCombineTest::TestBlockModeSingleRow [GOOD] >> TDqHashCombineTest::TestBlockModeMultiBlocks >> HttpProxy::ChunkedResponse3 [GOOD] >> HttpProxy::StreamingResponse1 >> Splitter::Simple >> HttpProxy::StreamingResponse1 [GOOD] |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/ut/unittest >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] >> TAllocatorSuite/WithAllPools::PageAlligned/DummyMemPool ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/oidc_proxy/ut/unittest >> Mvp::GetAddressWithoutPort [GOOD] Test command err: 2026-01-08T15:24:59.635175Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.635499Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:24:59.664829Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.665318Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:24:59.674705Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.675170Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2026-01-08T15:24:59.685624Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.685945Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2026-01-08T15:24:59.698020Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.700068Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2026-01-08T15:24:59.712130Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.718010Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 204 2026-01-08T15:24:59.778033Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.778106Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.778413Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 400 2026-01-08T15:24:59.778511Z :MVP DEBUG: oidc_protected_page.cpp:143: Try to send request to HTTPS port 2026-01-08T15:24:59.778554Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.778728Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:24:59.785316Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.785405Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.785639Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 400 2026-01-08T15:24:59.840649Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.840780Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.841194Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 307 2026-01-08T15:24:59.850418Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.850508Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.850799Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2026-01-08T15:24:59.863172Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.863260Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.863469Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2026-01-08T15:24:59.871138Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.871213Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.871454Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2026-01-08T15:24:59.878869Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:24:59.878975Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:24:59.879275Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 302 2026-01-08T15:24:59.998961Z :MVP DEBUG: oidc_protected_page_nebius.cpp:24: Start OIDC process 2026-01-08T15:24:59.999499Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_session_cookie_79632E6F617574682E7964622D766965776572: c2Vz****aWU= (CE0CB168)) 2026-01-08T15:24:59.999560Z :MVP DEBUG: oidc_protected_page_nebius.cpp:96: Exchange session token 2026-01-08T15:24:59.999922Z :MVP DEBUG: oidc_protected_page_nebius.cpp:53: Getting access token: 200 OK 2026-01-08T15:24:59.999990Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:00.000201Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:00.466412Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 401 2026-01-08T15:25:00.706403Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2026-01-08T15:25:00.707244Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:00.707762Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:00.718043Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2026-01-08T15:25:00.731208Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:25:00.731285Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:00.731606Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:01.001425Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2026-01-08T15:25:01.002339Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:01.006810Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:01.026399Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2026-01-08T15:25:01.044769Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2026-01-08T15:25:01.044853Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:01.045213Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:01.150897Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:01.151153Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2026-01-08T15:25:01.397670Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:01.397881Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2026-01-08T15:25:01.642591Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:01.643103Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:01.838429Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 401 2026-01-08T15:25:02.140850Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:02.141534Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:02.274408Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2026-01-08T15:25:02.458886Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:02.459533Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:02.582391Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2026-01-08T15:25:02.827888Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:02.828410Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2026-01-08T15:25:02.950409Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 412 2026-01-08T15:25:03.234403Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2026-01-08T15:25:03.265096Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2026-01-08T15:25:03.285143Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2026-01-08T15:25:03.403777Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:03.404023Z :MVP DEBUG: extension.cpp:14: Can not process request to protected resource: GET /ydb.viewer.page/counters HTTP/1.1 Host: oidcproxy.net Authorization: 2026-01-08T15:25:03.515078Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:03.515278Z :MVP DEBUG: oidc_session_create.cpp:43: Restore oidc context failed: Cannot find cookie ydb_oidc_cookie 2026-01-08T15:25:03.770519Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2026-01-08T15:25:03.770792Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2026-01-08T15:25:03.909702Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2026-01-08T15:25:03.909803Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2026-01-08T15:25:03.909847Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:49: Request impersonated token 2026-01-08T15:25:03.910182Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:100: Incoming response from authorization server: 200 2026-01-08T15:25:03.910281Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:89: Set impersonated cookie: (__Host_impersonated_cookie_636C69656E745F6964: aW1w****bg== (B126DD61)) 2026-01-08T15:25:04.129891Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2026-01-08T15:25:04.129997Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2026-01-08T15:25:04.363815Z :MVP DEBUG: oidc_cleanup_page.cpp:20: Clear cookie: (__Host_impersonated_cookie_636C69656E745F6964) 2026-01-08T15:25:04.550102Z :MVP DEBUG: oidc_protected_page_nebius.cpp:24: Start OIDC process 2026-01-08T15:25:04.550191Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2026-01-08T15:25:04.550243Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2026-01-08T15:25:04.550284Z :MVP DEBUG: oidc_protected_page_nebius.cpp:107: Exchange impersonated token 2026-01-08T15:25:04.590784Z :MVP DEBUG: oidc_protected_page_nebius.cpp:53: Getting access token: 200 OK 2026-01-08T15:25:04.590869Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:04.591059Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:04.802676Z :MVP DEBUG: oidc_protected_page_nebius.cpp:24: Start OIDC process 2026-01-08T15:25:04.802792Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2026-01-08T15:25:04.802859Z :MVP DEBUG: openid_connect.cpp:260: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2026-01-08T15:25:04.802908Z :MVP DEBUG: oidc_protected_page_nebius.cpp:107: Exchange impersonated token 2026-01-08T15:25:04.803212Z :MVP DEBUG: oidc_protected_page_nebius.cpp:53: Getting access token: 401 OK 2026-01-08T15:25:04.803249Z :MVP DEBUG: oidc_protected_page_nebius.cpp:65: Getting access token: {"error": "bad_token"} 2026-01-08T15:25:04.803289Z :MVP DEBUG: oidc_protected_page_nebius.cpp:121: Clear impersonated cookie (__Host_impersonated_cookie_636C69656E745F6964) and retry 2026-01-08T15:25:05.017929Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:05.018299Z :MVP DEBUG: oidc_protected_page.cpp:51: Incoming incomplete response for protected resource: 200 2026-01-08T15:25:05.018480Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 59 bytes 2026-01-08T15:25:05.018661Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 59 bytes 2026-01-08T15:25:05.018772Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 14 bytes 2026-01-08T15:25:05.018842Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 0 bytes 2026-01-08T15:25:05.285583Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:05.285838Z :MVP DEBUG: oidc_protected_page.cpp:51: Incoming incomplete response for protected resource: 200 2026-01-08T15:25:05.285976Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 59 bytes 2026-01-08T15:25:05.286062Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 59 bytes 2026-01-08T15:25:05.286126Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 14 bytes 2026-01-08T15:25:05.286219Z :MVP DEBUG: oidc_protected_page.cpp:71: Incoming data chunk for protected resource: 0 bytes 2026-01-08T15:25:05.488063Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:05.615990Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:05.646390Z :MVP DEBUG: extension_whoami.cpp:33: Whoami Extension Info: OK 2026-01-08T15:25:05.772546Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:05.840265Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 2026-01-08T15:25:05.926365Z :MVP DEBUG: extension_whoami.cpp:33: Whoami Extension Info: OK 2026-01-08T15:25:06.076136Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:06.114668Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 TProfileServiceMock Get: Invalid or missing token: Bearer bad-token 2026-01-08T15:25:06.236172Z :MVP DEBUG: extension_whoami.cpp:40: Whoami Extension Info 401: Invalid or missing token, 2026-01-08T15:25:06.341987Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:06.345816Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 403 2026-01-08T15:25:06.522456Z :MVP DEBUG: extension_whoami.cpp:33: Whoami Extension Info: OK 2026-01-08T15:25:06.618310Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:06.671345Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 403 2026-01-08T15:25:06.754402Z :MVP DEBUG: extension_whoami.cpp:33: Whoami Extension Info: OK 2026-01-08T15:25:06.893071Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:07.079736Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 403 TProfileServiceMock Get: Invalid or missing token: Bearer bad-token 2026-01-08T15:25:07.108540Z :MVP DEBUG: extension_whoami.cpp:40: Whoami Extension Info 401: Invalid or missing token, 2026-01-08T15:25:07.232745Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:07.331306Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 307 2026-01-08T15:25:07.426284Z :MVP DEBUG: oidc_protected_page.cpp:119: Forward user request bypass OIDC 2026-01-08T15:25:07.470712Z :MVP DEBUG: oidc_protected_page.cpp:38: Incoming response for protected resource: 200 |84.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/dnsresolver/ut/unittest >> OnDemandDnsResolver::ResolveLocalHost [GOOD] >> HttpProxy::StreamingResponse2 >> benchmark_actors_async::BM_ManualPingActor/process_time [GOOD] >> benchmark_actors_async::BM_AsyncPingActor/process_time [GOOD] >> benchmark_actors_async::BM_ManualYieldActor/process_time [GOOD] >> benchmark_actors_async::BM_AsyncYieldActor/process_time [GOOD] >> benchmark_actors_async::BM_CallAsync/process_time [GOOD] >> benchmark_actors_async::BM_RescheduleRunnableAsync/process_time [GOOD] >> HttpProxy::StreamingResponse2 [GOOD] >> HttpProxy::StreamingResponse3 >> TRangeTreap::Simple [GOOD] >> TRangeTreap::Sequential |84.7%| [TS] {BAZEL_UPLOAD} ydb/core/ymq/ut/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/mvp/oidc_proxy/ut/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/library/actors/dnsresolver/ut/unittest >> HttpProxy::StreamingResponse3 [GOOD] >> HttpProxy::StreamingFatResponse1 |84.8%| [TS] {RESULT} ydb/core/ymq/ut/unittest |84.8%| [TS] {RESULT} ydb/library/actors/dnsresolver/ut/unittest >> TPathTests::NormalizeEmpty [GOOD] >> TPathTests::NormalizeSlashes [GOOD] >> TPathTests::NormalizeNoSlashes [GOOD] >> TPathTests::NormalizeWithSlashes [GOOD] >> TPathTests::TestRegexFromWildcards [GOOD] >> HttpProxy::StreamingFatResponse1 [GOOD] >> HttpProxy::StreamingCompressedFatResponse1 >> MetadataConversion::MakeAuthTest [GOOD] >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] |84.8%| [TS] {RESULT} ydb/mvp/oidc_proxy/ut/unittest >> HttpProxy::StreamingCompressedFatResponse1 [GOOD] >> HttpProxy::StreamingCompressedFatRandomResponse1 >> Splitter::Simple [GOOD] >> Splitter::Small |84.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Splitter::Small [GOOD] >> Splitter::Minimal [GOOD] >> Splitter::Trivial >> PushdownTest::NoFilter >> Splitter::Trivial [GOOD] >> Splitter::BigAndSmall >> HttpProxy::StreamingCompressedFatRandomResponse1 [GOOD] >> HttpProxy::StreamingResponseWithProgress1 |84.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.8%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> PushdownTest::NoFilter [GOOD] |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/test_connection/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/backup/common/ut/unittest >> PathsNormalizationTest::NormalizeExportPrefix [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/actors/async/benchmark/benchmark_actors_async >> benchmark_actors_async::BM_RescheduleRunnableAsync/process_time [GOOD] Test command err: 2026-01-08T15:24:58+00:00 Running /home/runner/.ya/build/build_root/663n/00353a/ydb/library/actors/async/benchmark/benchmark_actors_async Run on (64 X 2000 MHz CPU s) CPU Caches: L1 Data 32 KiB (x64) L1 Instruction 32 KiB (x64) L2 Unified 4096 KiB (x32) L3 Unified 16384 KiB (x2) Load Average: 154.07, 78.65, 37.26 ***WARNING*** ASLR is enabled, the results may have unreproducible noise in them. >> PushdownTest::Equal >> HttpProxy::StreamingResponseWithProgress1 [GOOD] >> HttpProxy::RequestAfter307 >> PushdownTest::Equal [GOOD] |84.8%| [TS] {RESULT} ydb/library/actors/async/benchmark/benchmark_actors_async |84.8%| [TS] {RESULT} ydb/core/fq/libs/test_connection/ut/unittest >> Splitter::BigAndSmall [GOOD] >> Splitter::CritSmallPortions >> HttpProxy::RequestAfter307 [GOOD] >> TDqHashCombineTest::TestBlockModeMultiBlocks [GOOD] >> TDqHashCombineTest::TestWideModeNoInput >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/fq/libs/test_connection/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/public_http/ut/unittest >> HttpRouter::Basic [GOOD] >> ReadOnlyVDisk::TestStorageLoad [GOOD] |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/backup/common/ut/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/library/actors/async/benchmark/benchmark_actors_async |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/public_http/ut/unittest >> TDqHashCombineTest::TestWideModeNoInput [GOOD] >> TDqHashCombineTest::TestWideModeSingleRow >> MdbEndpoingGenerator::Legacy [GOOD] >> MdbEndpoingGenerator::Generic_NoTransformHost [GOOD] >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> PushdownTest::NotEqualInt32Int64 |84.8%| [TS] {RESULT} ydb/core/public_http/ut/unittest |84.8%| [TS] {RESULT} ydb/core/backup/common/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/object_listers/ut/unittest >> TPathTests::TestRegexFromWildcards [GOOD] >> TDqHashCombineTest::TestWideModeSingleRow [GOOD] >> TDqHashCombineTest::TestWideModeMultiRows >> PushdownTest::NotEqualInt32Int64 [GOOD] |84.8%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/s3/object_listers/ut/unittest |84.8%| [TS] {RESULT} ydb/library/yql/providers/s3/object_listers/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/gateway/ut/gtest >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] >> TAllocatorSuite/WithAllPools::RcBufDetach/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::RcBufDetach/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::RcBufDetachAfterMut/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::RcBufDetachAfterMut/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::RcBufDetachPageAlign/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::RcBufDetachPageAlign/DummyMemPool [FAIL] >> TAllocatorSuite/WithAllPools::DetachAndDestroySrc/SlotMemPool >> PushdownTest::TrueCoalesce [GOOD] >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] >> TAllocatorSuite/WithAllPools::DetachAndDestroySrc/SlotMemPool [FAIL] >> TAllocatorSuite/WithAllPools::DetachAndDestroySrc/DummyMemPool [FAIL] >> TRdmaLow/TCqMode::ReadInOneProcessIpV4/POLLING >> PushdownTest::CmpInt16AndInt32 >> ChannelScheduler::PriorityTraffic >> PushdownTest::CmpInt16AndInt32 [GOOD] |84.8%| [TS] {RESULT} ydb/core/kqp/gateway/ut/gtest |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/kqp/gateway/ut/gtest >> ChannelScheduler::PriorityTraffic [GOOD] >> ConnectionChecker::CheckParams >> TSchedulerTest::SimpleFifo [GOOD] >> TSchedulerTest::ReserveForSmall [GOOD] >> TSchedulerTest::OneUserForCluster [GOOD] >> TSchedulerTest::DoNotReserveForSmall [GOOD] >> TSchedulerTest::NewbieFirst [GOOD] >> TSchedulerTest::FifoAfterOneHour [GOOD] >> TSchedulerTest::HalfWorkersForSmall [GOOD] >> TSchedulerTest::Use75PercentForLargeInNonOverload [GOOD] >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] >> PushdownTest::PartialAnd >> TestIssuesGrouping::ShouldCountEveryIssue [GOOD] >> TestIssuesGrouping::ShouldRemoveOldIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveTheOldestIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldSaveSubIssues [GOOD] >> ResultReceiver::ReceiveStatus [GOOD] >> ResultReceiver::ReceiveError |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/actors/http/ut/unittest >> HttpProxy::RequestAfter307 [GOOD] |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> PushdownTest::PartialAnd [GOOD] >> PushdownTest::PartialAndOneBranchPushdownable >> ResultReceiver::ReceiveError [GOOD] >> ResultReceiver::WriteQueue [GOOD] >> TDqHashCombineTest::TestWideModeMultiRows [GOOD] >> TDqHashCombineTest::TestWideModeAggregationNoInput |84.8%| [TS] {BAZEL_UPLOAD} ydb/library/actors/http/ut/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> PushdownTest::PartialAndOneBranchPushdownable [GOOD] >> ConnectionChecker::CheckParams [GOOD] >> ConnectionChecker::CheckReject |84.8%| [TS] {RESULT} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> PushdownTest::NotNull [GOOD] >> ConnectionChecker::CheckReject [GOOD] >> DynamicProxy::RaceCheck1 |84.8%| [TS] {RESULT} ydb/library/actors/http/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestStorageLoad [GOOD] Test command err: RandomSeed# 13291375796751881640 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2026-01-08T15:24:22.760701Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:22.763856Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:22.771309Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:22.780492Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:22.780706Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:22.941821Z 1 00h02m38.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.096511Z 1 00h02m38.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.123052Z 1 00h02m38.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.131656Z 1 00h02m38.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.157450Z 1 00h02m38.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.208069Z 1 00h02m38.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.830232Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.833234Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.849594Z 1 00h02m39.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.875839Z 1 00h02m39.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.902901Z 1 00h02m39.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.930586Z 1 00h02m39.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:23.961628Z 1 00h02m39.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.061017Z 1 00h02m40.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.175327Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.184739Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.211402Z 1 00h02m40.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.260742Z 1 00h02m40.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.297583Z 1 00h02m40.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.320319Z 1 00h02m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.343505Z 1 00h02m40.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.372279Z 1 00h02m40.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.402108Z 1 00h02m40.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.441268Z 1 00h02m40.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.478098Z 1 00h02m41.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.546230Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.553233Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.646584Z 1 00h02m41.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:24.940688Z 1 00h02m41.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.332700Z 1 00h02m41.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.519484Z 1 00h02m41.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.626008Z 1 00h02m41.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.638818Z 1 00h02m42.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.839375Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.841608Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.869342Z 1 00h02m42.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:25.879888Z 1 00h02m42.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.012851Z 1 00h02m42.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.027677Z 1 00h02m42.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.050886Z 1 00h02m42.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.065115Z 1 00h02m43.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.078257Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.078881Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.415677Z 1 00h02m43.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.452538Z 1 00h02m43.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.484299Z 1 00h02m43.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.530702Z 1 00h02m43.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.915027Z 1 00h02m43.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:26.956657Z 1 00h02m43.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.009930Z 1 00h02m43.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.079531Z 1 00h02m43.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.165176Z 1 00h02m44.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.239466Z 1 00h02m44.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.639099Z 1 00h02m44.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.688013Z 1 00h02m44.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.783913Z 1 00h02m44.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:27.970601Z 1 00h02m44.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.004291Z 1 00h02m44.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.031055Z 1 00h02m44.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.323462Z 1 00h02m45.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.341580Z 1 00h02m45.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.386401Z 1 00h02m45.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.416626Z 1 00h02m45.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.436153Z 1 00h02m45.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.719071Z 1 00h02m45.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.747411Z 1 00h02m45.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5327:706] 2026-01-08T15:24:28.963631Z 1 00h02m46.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [ ... k read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2026-01-08T15:24:54.543973Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.547136Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.561503Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.576150Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.577906Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.627863Z 8 00h20m54.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.695380Z 8 00h20m54.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:54.838952Z 8 00h20m54.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.186494Z 8 00h20m54.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.215565Z 8 00h20m55.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.264540Z 8 00h20m55.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.510226Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.510736Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.521235Z 8 00h20m55.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.722041Z 8 00h20m55.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.737441Z 8 00h20m55.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.906003Z 8 00h20m56.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.936049Z 8 00h20m56.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:55.998628Z 8 00h20m56.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.116302Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.118832Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.137713Z 8 00h20m56.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.292874Z 8 00h20m56.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.551788Z 8 00h20m56.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.584136Z 8 00h20m56.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.667284Z 8 00h20m57.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:56.699351Z 8 00h20m57.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.037017Z 8 00h20m57.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.062272Z 8 00h20m57.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.138056Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.139658Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.671400Z 8 00h20m57.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.685417Z 8 00h20m57.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.717213Z 8 00h20m57.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.736469Z 8 00h20m58.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.894225Z 8 00h20m58.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.913338Z 8 00h20m58.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.939429Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:57.941084Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.026053Z 8 00h20m58.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.477684Z 8 00h20m58.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.633584Z 8 00h20m58.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.650919Z 8 00h20m59.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.687808Z 8 00h20m59.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.741819Z 8 00h20m59.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.755263Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.756320Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.962220Z 8 00h20m59.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:58.978826Z 8 00h20m59.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.151849Z 8 00h20m59.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.178744Z 8 00h21m00.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.346107Z 8 00h21m00.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.368381Z 8 00h21m00.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.447799Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.449836Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.466513Z 8 00h21m00.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.482907Z 8 00h21m00.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.499277Z 8 00h21m00.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.523038Z 8 00h21m00.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.548272Z 8 00h21m00.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.623452Z 8 00h21m01.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.832683Z 8 00h21m01.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.893280Z 8 00h21m01.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.916013Z 8 00h21m01.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:24:59.949736Z 8 00h21m01.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.006245Z 8 00h21m01.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.037410Z 8 00h21m01.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.136363Z 8 00h21m02.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.457009Z 8 00h21m02.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.554075Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] 2026-01-08T15:25:00.559922Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5376:755] >> PushdownTest::NotNullForDatetime >> TDqHashCombineTest::TestWideModeAggregationNoInput [GOOD] >> TDqHashCombineTest::TestWideModeAggregationSingleRow ------- [TS] {asan, default-linux-x86_64, release} ydb/mvp/core/ut/unittest >> Mvp::TokenatorRefreshMetadataTokenGood [GOOD] Test command err: 2026-01-08T15:25:05.293176Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2026-01-08T15:25:05.293467Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2026-01-08T15:25:05.364144Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2026-01-08T15:25:05.364396Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token 2026-01-08T15:25:10.365989Z :MVP DEBUG: mvp_tokens.cpp:77: Refreshing token metadataTokenName 2026-01-08T15:25:10.366345Z :MVP DEBUG: mvp_tokens.cpp:217: Updating metadata token ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] Test command err: 2026-01-08T15:24:39.645558Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:774:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645588Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:113:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645611Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:161:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645638Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:613:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645659Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:905:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645682Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:434:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645707Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:64:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645729Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:74:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645749Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:681:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.645771Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:370:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646771Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:847:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646794Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:395:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646814Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:594:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646837Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:764:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646858Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:404:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646882Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:176:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646901Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:579:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646921Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:939:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646945Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:147:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.646965Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:784:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648075Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:390:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648093Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:794:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648112Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:463:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648133Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:472:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648151Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:954:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648168Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:720:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648187Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:191:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648208Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:341:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648226Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:21:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.648247Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:234:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649115Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:949:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649138Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:862:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649156Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:171:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649175Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:6:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649194Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:546:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649231Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:706:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649247Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:516:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649264Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:361:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649294Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:696:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649315Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:118:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649960Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:852:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649978Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:832:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.649996Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:239:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650021Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:69:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650036Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:409:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650052Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:99:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650075Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:973:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650100Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:701:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650120Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:541:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650139Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:915:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650858Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:584:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650880Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:419:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650903Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:682:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650924Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:195:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650946Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:30:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650964Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:16:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.650985Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:65:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651007Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:1:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651027Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:750:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651044Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:653:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651489Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:98:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651508Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:162:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651532Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:89:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651555Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:614:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651573Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:580:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651592Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:264:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651615Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:983:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651636Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:133:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651654Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:900:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.651675Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:997:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652095Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:502:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652115Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:492:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652133Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:929:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652155Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:308:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652179Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:618:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652201Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:823:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652221Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:890:0:0:66560:1] Marker# BSVS08 2026-01-08T15:24:39.652244Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:637: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:380:0:0:66560:1] Marker# BSVS08 |84.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest >> PushdownTest::NotNullForDatetime [GOOD] |84.8%| [TS] {BAZEL_UPLOAD} ydb/mvp/core/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/scheduler/ut/unittest >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] |84.8%| [TS] {RESULT} ydb/mvp/core/ut/unittest |84.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/dq/scheduler/ut/unittest >> PushdownTest::IsNull [GOOD] |84.8%| [TS] {RESULT} ydb/library/yql/providers/dq/scheduler/ut/unittest |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/public/ut/unittest |84.8%| [TS] {BAZEL_UPLOAD} ydb/core/tx/sequenceshard/public/ut/unittest >> TRdmaLow/TCqMode::ReadInOneProcessIpV4/EVENT |84.8%| [TS] {RESULT} ydb/core/tx/sequenceshard/public/ut/unittest >> ColumnShardConfigValidation::AcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::NotAcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::CorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::CorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::NotCorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::CorrectZSTDCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] >> PushdownTest::StringFieldsNotSupported >> TDqHashCombineTest::TestWideModeAggregationSingleRow [GOOD] >> TDqHashCombineTest::TestWideModeAggregationWithSpilling |84.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/actors/ut/unittest >> ResultReceiver::WriteQueue [GOOD] |84.8%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/dq/actors/ut/unittest >> PushdownTest::StringFieldsNotSupported [GOOD] |84.9%| [TS] {RESULT} ydb/library/yql/providers/dq/actors/ut/unittest >> PushdownTest::StringFieldsNotSupported2 >> PushdownTest::StringFieldsNotSupported2 [GOOD] >> PushdownTest::RegexpPushdown >> PushdownTest::RegexpPushdown [GOOD] >> PushdownTest::DecimalPushdownPrecision10Scale0 |84.9%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/libydb-core-viewer.a >> TRangeTreap::Sequential [GOOD] >> PushdownTest::DecimalPushdownPrecision10Scale0 [GOOD] >> OldFormat::SameVersion [GOOD] >> TRangeTreap::Random >> OldFormat::DefaultRules [GOOD] >> PushdownTest::DecimalPushdownPrecesion4Scale2 [GOOD] >> TRangeTreap::Random [GOOD] >> TDqSolomonWriteActorTest::TestWriteFormat >> OldFormat::PrevYear [GOOD] >> TRangeTreap::EarlyQuit >> OldFormat::Trunk [GOOD] >> OldFormat::UnexpectedTrunk [GOOD] |84.9%| [AR] {RESULT} $(B)/ydb/core/viewer/libydb-core-viewer.a >> OldFormat::TooOld [GOOD] >> OldFormat::OldNbs [GOOD] >> VersionParser::Basic [GOOD] >> YdbVersion::DefaultSameVersion [GOOD] >> YdbVersion::DefaultPrevMajor [GOOD] >> TRdmaLow/TCqMode::ReadInOneProcessIpV6/POLLING |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] |84.9%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbVersion::DefaultNextMajor [GOOD] >> BlobDepot::BasicPutAndGet >> YdbVersion::DefaultHotfix [GOOD] >> TBsVDiskRepl3::ReplPerf [GOOD] >> YdbVersion::DefaultCompatible [GOOD] >> YdbVersion::DefaultNextYear [GOOD] >> YdbVersion::DefaultPrevYear [GOOD] >> YdbVersion::DefaultNewMajor [GOOD] >> YdbVersion::DefaultOldMajor [GOOD] >> YdbVersion::DefaultDifferentBuild [GOOD] >> YdbVersion::DefaultDifferentBuildIncompatible [GOOD] >> YdbVersion::DefaulPatchTag [GOOD] >> YdbVersion::LimitOld [GOOD] >> YdbVersion::LimitNew [GOOD] |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> YdbVersion::CurrentCanLoadFrom [GOOD] >> YdbVersion::CurrentCanLoadFromAllOlder [GOOD] >> YdbVersion::CurrentCanLoadFromIncompatible [GOOD] >> YdbVersion::CurrentStoresReadableBy [GOOD] >> YdbVersion::StoredReadableBy [GOOD] >> YdbVersion::StoredReadableByIncompatible [GOOD] >> YdbVersion::StoredWithRules [GOOD] >> YdbVersion::StoredWithRulesIncompatible [GOOD] >> YdbVersion::OldNbsStored [GOOD] >> YdbVersion::OldNbsIncompatibleStored [GOOD] >> YdbVersion::NewNbsCurrent [GOOD] >> YdbVersion::NewNbsIncompatibleCurrent [GOOD] >> YdbVersion::OneAcceptedVersion [GOOD] >> YdbVersion::ForbiddenMinor [GOOD] >> YdbVersion::DefaultRulesWithExtraForbidden [GOOD] >> YdbVersion::ExtraAndForbidden [GOOD] >> YdbVersion::SomeRulesAndOtherForbidden [GOOD] >> YdbVersion::Component [GOOD] >> YdbVersion::OtherComponent [GOOD] >> YdbVersion::YDBAndNbs [GOOD] >> YdbVersion::DifferentYdbVersionsWithNBSRules [GOOD] >> YdbVersion::WithPatchAndWithoutPatch [GOOD] >> YdbVersion::AcceptSpecificHotfixWithoutPatch [GOOD] >> YdbVersion::TrunkYDBAndNbs [GOOD] >> YdbVersion::TrunkAndStable [GOOD] >> YdbVersion::CompatibleWithSelf [GOOD] >> YdbVersion::PrintCurrentVersionProto [GOOD] |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/driver_lib/version/ut/unittest >> YdbVersion::PrintCurrentVersionProto [GOOD] Test command err: Application: "ydb" >> ArrowTest::BatchBuilder >> AuthConfigValidation::AcceptValidPasswordComplexity [GOOD] >> AuthConfigValidation::CannotAcceptInvalidPasswordComplexity [GOOD] >> AuthConfigValidation::AcceptValidAccountLockoutConfig [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/driver_lib/version/ut/unittest |84.9%| [TS] {RESULT} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] >> TestCommon::Empty [GOOD] >> TestCommon::ParseCounterName [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByStage >> TestCommon::CollectTaskRunnerStatisticsByStage [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> TAsyncComputeActorTest::Empty >> ArrowTest::BatchBuilder [GOOD] >> ArrowTest::ArrowToYdbConverter [GOOD] >> ArrowTest::SortWithCompositeKey >> LongTxServicePublicTypes::LongTxId [GOOD] >> LongTxServicePublicTypes::SnapshotMaxTxId [GOOD] >> LongTxServicePublicTypes::Snapshot [GOOD] >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] >> TAsyncComputeActorTest::Empty [GOOD] >> ArrowTest::SortWithCompositeKey [GOOD] >> ArrowTest::MergingSortedInputStream [GOOD] >> ArrowTest::MergingSortedInputStreamReversed [GOOD] >> TestFederatedQueryHelpers::TestCheckNestingDepth [GOOD] >> TestFederatedQueryHelpers::TestTruncateIssues [GOOD] >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] >> ArrowTest::MergingSortedInputStreamReplace [GOOD] >> Splitter::CritSmallPortions [GOOD] >> ArrowTest::MaxVersionFilter >> XdcRdmaTest::SerializeToRope [FAIL] >> Splitter::Crit >> XdcRdmaTest::SendRdma >> TRdmaLow/TCqMode::ReadInOneProcessIpV6/EVENT >> TAsyncComputeActorTest::Basic >> ArrowTest::MaxVersionFilter [GOOD] >> ArrowTest::EqualKeysVersionFilter [GOOD] |84.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> ColumnFilter::MergeFilters [GOOD] >> XdcRdmaTest::SendRdma [FAIL] >> ColumnFilter::CombineFilters [GOOD] >> XdcRdmaTest::SendRdmaWithRegionOffset >> ColumnFilter::ApplyFilterToFilter [GOOD] >> ColumnFilter::FilterSlice [GOOD] >> ColumnFilter::FilterCheckSlice [GOOD] >> ColumnFilter::FilterSlice1 [GOOD] >> ColumnFilter::CutFilter1 [GOOD] >> ColumnFilter::CutFilter2 [GOOD] >> Dictionary::Simple >> XdcRdmaTest::SendRdmaWithRegionOffset [FAIL] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> PushdownTest::DecimalPushdownPrecesion4Scale2 [GOOD] Test command err: Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (Bool '"true") $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) discovered cluster name: test_clusterDescribe table for: ``test_cluster`.`test_table``Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr to optimize: ( (let $1 (Bool '"true")) (let $2 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) $1))) (let $3 (DataSink '"result")) (let $4 (ResWrite! (Left! $2) $3 (Key) (FlatMap (Right! $2) (lambda '($6) (OptionalIf $1 $6))) '('('type)))) (return (Commit! $4 $3)) ) OptionalIf over Bool 'trueExpr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) FlatMap with JustExpr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) PhysicalOptimizer-TrimReadWorldExpr: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) ResPullExpr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42BuildGenericDqSourceSettingsBuilt settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_decimal_precision10_scale0" '"col_decimal_precision4_scale2" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($34) (Bool '"true")))) (let $4 (DataType 'Bool)) (let $5 (DataType 'Date)) (let $6 (DataType 'Datetime)) (let $7 (DataType 'Decimal '10 '0)) (let $8 (DataType 'Decimal '4 '2)) (let $9 (DataType 'Double)) (let $10 (DataType 'DyNumber)) (let $11 (DataType 'Float)) (let $12 (DataType 'Int16)) (let $13 (DataType 'Int32)) (let $14 (DataType 'Int64)) (let $15 (DataType 'Int8)) (let $16 (DataType 'Interval)) (let $17 (DataType 'Json)) (let $18 (DataType 'JsonDocument)) (let $19 (DataType 'String)) (let $20 (DataType 'Timestamp)) (let $21 (DataType 'TzDate)) (let $22 (DataType 'TzDatetime)) (let $23 (DataType 'TzTimestamp)) (let $24 (DataType 'Uint16)) (let $25 (DataType 'Uint32)) (let $26 (DataType 'Uint64)) (let $27 (DataType 'Uint8)) (let $28 (DataType 'Utf8)) (let $29 (DataType 'Uuid)) (let $30 (DataType 'Yson)) (let $31 (StructType '('"col_bool" $4) '('"col_date" $5) '('"col_datetime" $6) '('"col_decimal_precision10_scale0" $7) '('"col_decimal_precision4_scale2" $8) '('"col_double" $9) '('"col_dynumber" $10) '('"col_float" $11) '('"col_int16" $12) '('"col_int32" $13) '('"col_int64" $14) '('"col_int8" $15) '('"col_interval" $16) '('"col_json" $17) '('"col_json_document" $18) '('"col_optional_bool" (OptionalType $4)) '('"col_optional_date" (OptionalType $5)) '('"col_optional_datetime" (OptionalType $6)) '('"col_optional_double" (OptionalType $9)) '('"col_optional_dynumber" (OptionalType $10)) '('"col_optional_float" (OptionalType $11)) '('"col_optional_int16" (OptionalType $12)) '('"col_optional_int32" (OptionalType $13)) '('"col_optional_int64" (OptionalType $14)) '('"col_optional_int8" (OptionalType $15)) '('"col_optional_interval" (OptionalType $16)) '('"col_optional_json" (OptionalType $17)) '('"col_optional_json_document" (OptionalType $18)) '('"col_optional_string" (OptionalType $19)) '('"col_optional_timestamp" (OptionalType $20)) '('"col_optional_tz_date" (OptionalType $21)) '('"col_optional_tz_datetime" (OptionalType $22)) '('"col_optional_tz_timestamp" (OptionalType $23)) '('"col_optional_uint16" (OptionalType $24)) '('"col_optional_uint32" (OptionalType $25)) '('"col_optional_uint64" (OptionalType $26)) '('"col_optional_uint8" (OptionalType $27)) '('"col_optional_utf8" (OptionalType $28)) '('"col_optional_uuid" (OptionalType $29)) '('"col_optional_yson" (OptionalType $30)) '('"col_string" $19) '('"col_timestamp" $20) '('"col_tz_date" $21) '('"col_tz_datetime" $22) '('"col_tz_timestamp" $23) '('"col_uint16" $24) '('"col_uint32" $25) '('"col_uint64" $26) '('"col_uint8" $27) '('"col_utf8" $28) '('"col_uuid" $29) '('"col_yson" $30))) (let $32 (DqSourceWrap $3 (DataSource '"generic" '"test_cluster") $31)) (let $33 (ResPull! world $1 (Key) $32 '('('type)) '"generic")) (return (Commit! $33 $1)) ) Dq source filter settings: GenericConfiguration::AddCluster: name = test_cluster, kind = POSTGRESQL, database name = database, database id = , endpoint = { host: "host" port: 42 }, use tls = 0, protocol = NATIVE Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (== (Member $row '"col_int16") (Int16 '42)) $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (== (Member $4 '"col_int16") (Int16 '42)) $4))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (== (Member $4 '"col_int16") (Int16 '42)) $4))) '('('type)))) (return (Commit! $3 $2)) ) discovered cluster name: test_clusterDescribe table for: ``test_cluster`.`test_table``Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (== (Member $5 '"col_int16") (Int16 '42)) $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (== (Member $5 '"col_int16") (Int16 '42)) $5))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (== (Member $5 '"col_int ... ) '('"col_optional_datetime" (OptionalType $8)) '('"col_optional_double" (OptionalType $11)) '('"col_optional_dynumber" (OptionalType $12)) '('"col_optional_float" (OptionalType $13)) '('"col_optional_int16" (OptionalType $14)) '('"col_optional_int32" (OptionalType $15)) '('"col_optional_int64" (OptionalType $16)) '('"col_optional_int8" (OptionalType $17)) '('"col_optional_interval" (OptionalType $18)) '('"col_optional_json" (OptionalType $19)) '('"col_optional_json_document" (OptionalType $20)) '('"col_optional_string" (OptionalType $21)) '('"col_optional_timestamp" (OptionalType $22)) '('"col_optional_tz_date" (OptionalType $23)) '('"col_optional_tz_datetime" (OptionalType $24)) '('"col_optional_tz_timestamp" (OptionalType $25)) '('"col_optional_uint16" (OptionalType $26)) '('"col_optional_uint32" (OptionalType $27)) '('"col_optional_uint64" (OptionalType $28)) '('"col_optional_uint8" (OptionalType $29)) '('"col_optional_utf8" (OptionalType $30)) '('"col_optional_uuid" (OptionalType $31)) '('"col_optional_yson" (OptionalType $32)) '('"col_string" $21) '('"col_timestamp" $22) '('"col_tz_date" $23) '('"col_tz_datetime" $24) '('"col_tz_timestamp" $25) '('"col_uint16" $26) '('"col_uint32" $27) '('"col_uint64" $28) '('"col_uint8" $29) '('"col_utf8" $30) '('"col_uuid" $31) '('"col_yson" $32))) (let $34 (DqSourceWrap $5 (DataSource '"generic" '"test_cluster") $33)) (let $35 (ResWrite! world $1 (Key) (FlatMap $34 (lambda '($37) (OptionalIf (== (Member $37 $3) $4) $37))) '('('type)))) (return (Commit! $35 $1)) ) Dq source filter settings: filter_typed { comparison { operation: EQ left_value { column: "col_decimal_precision10_scale0" } right_value { typed_value { type { decimal_type { precision: 10 } } value { bytes_value: "\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" } } } } } GenericConfiguration::AddCluster: name = test_cluster, kind = POSTGRESQL, database name = database, database id = , endpoint = { host: "host" port: 42 }, use tls = 0, protocol = NATIVE Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (== (Member $row '"col_decimal_precision4_scale2") (Decimal '"-22.22" '"4" '"2") ) $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (block '( (let $5 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $4 '"col_decimal_precision4_scale2") $5) $4)) )))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (block '( (let $5 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $4 '"col_decimal_precision4_scale2") $5) $4)) )))) '('('type)))) (return (Commit! $3 $2)) ) discovered cluster name: test_clusterDescribe table for: ``test_cluster`.`test_table``Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $5 '"col_decimal_precision4_scale2") $6) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $5 '"col_decimal_precision4_scale2") $6) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $5 '"col_decimal_precision4_scale2") $6) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (Decimal '"-22.22" '"4" '"2")) (return (OptionalIf (== (Member $5 '"col_decimal_precision4_scale2") $6) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) PhysicalOptimizer-TrimReadWorldPush filter lambda: ( (return (lambda '($1) (block '( (let $2 (Decimal '"-22.22" '"4" '"2")) (return (== (Member $1 '"col_decimal_precision4_scale2") $2)) )))) ) PhysicalOptimizer-PushFilterToReadTableExpr: ( (let $1 (DataSink '"result")) (let $2 '"col_decimal_precision4_scale2") (let $3 (Decimal '"-22.22" '"4" '"2")) (let $4 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) (== (Member $5 $2) $3)))) (lambda '($6) (OptionalIf (== (Member $6 $2) $3) $6))) '('('type)))) (return (Commit! $4 $1)) ) Expr: ( (let $1 (DataSink '"result")) (let $2 '"col_decimal_precision4_scale2") (let $3 (Decimal '"-22.22" '"4" '"2")) (let $4 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) (== (Member $5 $2) $3)))) (lambda '($6) (OptionalIf (== (Member $6 $2) $3) $6))) '('('type)))) (return (Commit! $4 $1)) ) Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 '"col_decimal_precision4_scale2") (let $3 (Decimal '"-22.22" '"4" '"2")) (let $4 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) (== (Member $5 $2) $3)))) (lambda '($6) (OptionalIf (== (Member $6 $2) $3) $6))) '('('type)))) (return (Commit! $4 $1)) ) Push filter. Lambda is already not emptyOptimized expr: ( (let $1 (DataSink '"result")) (let $2 '"col_decimal_precision4_scale2") (let $3 (Decimal '"-22.22" '"4" '"2")) (let $4 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) (== (Member $5 $2) $3)))) (lambda '($6) (OptionalIf (== (Member $6 $2) $3) $6))) '('('type)))) (return (Commit! $4 $1)) ) Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42BuildGenericDqSourceSettingsBuilt settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_decimal_precision10_scale0" '"col_decimal_precision4_scale2" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 '"col_decimal_precision4_scale2") (let $4 (Decimal '"-22.22" '"4" '"2")) (let $5 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($36) (== (Member $36 $3) $4)))) (let $6 (DataType 'Bool)) (let $7 (DataType 'Date)) (let $8 (DataType 'Datetime)) (let $9 (DataType 'Decimal '10 '0)) (let $10 (DataType 'Decimal '4 '2)) (let $11 (DataType 'Double)) (let $12 (DataType 'DyNumber)) (let $13 (DataType 'Float)) (let $14 (DataType 'Int16)) (let $15 (DataType 'Int32)) (let $16 (DataType 'Int64)) (let $17 (DataType 'Int8)) (let $18 (DataType 'Interval)) (let $19 (DataType 'Json)) (let $20 (DataType 'JsonDocument)) (let $21 (DataType 'String)) (let $22 (DataType 'Timestamp)) (let $23 (DataType 'TzDate)) (let $24 (DataType 'TzDatetime)) (let $25 (DataType 'TzTimestamp)) (let $26 (DataType 'Uint16)) (let $27 (DataType 'Uint32)) (let $28 (DataType 'Uint64)) (let $29 (DataType 'Uint8)) (let $30 (DataType 'Utf8)) (let $31 (DataType 'Uuid)) (let $32 (DataType 'Yson)) (let $33 (StructType '('"col_bool" $6) '('"col_date" $7) '('"col_datetime" $8) '('"col_decimal_precision10_scale0" $9) '('"col_decimal_precision4_scale2" $10) '('"col_double" $11) '('"col_dynumber" $12) '('"col_float" $13) '('"col_int16" $14) '('"col_int32" $15) '('"col_int64" $16) '('"col_int8" $17) '('"col_interval" $18) '('"col_json" $19) '('"col_json_document" $20) '('"col_optional_bool" (OptionalType $6)) '('"col_optional_date" (OptionalType $7)) '('"col_optional_datetime" (OptionalType $8)) '('"col_optional_double" (OptionalType $11)) '('"col_optional_dynumber" (OptionalType $12)) '('"col_optional_float" (OptionalType $13)) '('"col_optional_int16" (OptionalType $14)) '('"col_optional_int32" (OptionalType $15)) '('"col_optional_int64" (OptionalType $16)) '('"col_optional_int8" (OptionalType $17)) '('"col_optional_interval" (OptionalType $18)) '('"col_optional_json" (OptionalType $19)) '('"col_optional_json_document" (OptionalType $20)) '('"col_optional_string" (OptionalType $21)) '('"col_optional_timestamp" (OptionalType $22)) '('"col_optional_tz_date" (OptionalType $23)) '('"col_optional_tz_datetime" (OptionalType $24)) '('"col_optional_tz_timestamp" (OptionalType $25)) '('"col_optional_uint16" (OptionalType $26)) '('"col_optional_uint32" (OptionalType $27)) '('"col_optional_uint64" (OptionalType $28)) '('"col_optional_uint8" (OptionalType $29)) '('"col_optional_utf8" (OptionalType $30)) '('"col_optional_uuid" (OptionalType $31)) '('"col_optional_yson" (OptionalType $32)) '('"col_string" $21) '('"col_timestamp" $22) '('"col_tz_date" $23) '('"col_tz_datetime" $24) '('"col_tz_timestamp" $25) '('"col_uint16" $26) '('"col_uint32" $27) '('"col_uint64" $28) '('"col_uint8" $29) '('"col_utf8" $30) '('"col_uuid" $31) '('"col_yson" $32))) (let $34 (DqSourceWrap $5 (DataSource '"generic" '"test_cluster") $33)) (let $35 (ResWrite! world $1 (Key) (FlatMap $34 (lambda '($37) (OptionalIf (== (Member $37 $3) $4) $37))) '('('type)))) (return (Commit! $35 $1)) ) Dq source filter settings: filter_typed { comparison { operation: EQ left_value { column: "col_decimal_precision4_scale2" } right_value { typed_value { type { decimal_type { precision: 4 scale: 2 } } value { bytes_value: "R\367\377\377\377\377\377\377\377\377\377\377\377\377\377\377" } } } } } >> XdcRdmaTest::SendRdmaWithGlueWithRegionOffset |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> UtilString::ShrinkToFit [GOOD] >> TMemoryPoolTest::Transactions [GOOD] >> TMemoryPoolTest::AppendString [GOOD] >> TMemoryPoolTest::TransactionsWithAlignment [GOOD] >> TMemoryPoolTest::AllocOneByte [GOOD] >> TMemoryPoolTest::LongRollback [GOOD] >> XdcRdmaTest::SendRdmaWithGlueWithRegionOffset [FAIL] >> XdcRdmaTest::SendRdmaWithGlue |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/validation/auth_config_validator_ut/unittest >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl3::ReplPerf [GOOD] Test command err: 2026-01-08T15:24:39.703245Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:24:39.747766Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10394926400492387950] 2026-01-08T15:24:40.798709Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:24:50.531677Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:24:50.690537Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 3328025737782936453] 2026-01-08T15:24:50.794834Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:3:0]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2026-01-08T15:25:04.780139Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:25:04.862863Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17430886428065972962] 2026-01-08T15:25:05.973794Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/config/validation/auth_config_validator_ut/unittest >> XdcRdmaTest::SendRdmaWithGlue [FAIL] >> XdcRdmaTest::SendRdmaWithMultiGlue |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/provider/ut/unittest >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> XdcRdmaTest::SendRdmaWithMultiGlue [FAIL] >> XdcRdmaTest/XdcRdmaTestCqMode::SendMix/POLLING >> BlobDepot::BasicPutAndGet [GOOD] >> BlobDepot::TestBlockedEvGetRequest |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/dq/provider/ut/unittest >> GroupedMemoryLimiter::Simplest [GOOD] >> GroupedMemoryLimiter::Simple [GOOD] >> GroupedMemoryLimiter::CommonUsage [GOOD] >> GroupedMemoryLimiter::Update [GOOD] |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut_util/unittest >> TMemoryPoolTest::LongRollback [GOOD] |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/public/ut/unittest >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/tablet_flat/ut_util/unittest |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/tx/long_tx_service/public/ut/unittest |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/federated_query/ut/unittest >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/federated_query/ut/unittest |84.9%| [TM] {RESULT} ydb/core/driver_lib/version/ut/unittest >> TRdmaLow/TCqMode::ReadInOneProcessWithQpInterruption/POLLING >> XdcRdmaTest/XdcRdmaTestCqMode::SendMix/POLLING [FAIL] >> XdcRdmaTest/XdcRdmaTestCqMode::SendMix/EVENT >> YtLookupActor::Lookup |84.9%| [TS] {RESULT} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest |84.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.a ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/limiter/grouped_memory/ut/unittest >> GroupedMemoryLimiter::Update [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=1;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=1;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=1;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=1;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=50;delta=50; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=2;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=2;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=100;delta=50; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=2;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=2;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=3;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=50;delta=50; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=1;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=1;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=1;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=1;external_process_id=0;external_scope_id=0;forced_external_group_id=1;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:43;event=common_forced_group;count=1;external_group_id=2; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=3;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=100;delta=50; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=3;wait=0;external_group_id=2;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=3;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=2;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=2;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=1;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=1;min_group=2; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=2;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=2;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=2;min_group=NO_VALUE_OPTIONAL; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:73;event=scope_cleaned;process_id=0;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=50;delta=50; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=0;delta=50; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=1;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=1;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=4;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=4;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=10;delta=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=2;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=2;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=5;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=3;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=3;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=6;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=7;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=7;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=1010;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=7;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=7;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=1;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=1;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=1;min_group=2; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=2;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:40;event=forced_group;count=1;external_group_id=2; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=5;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=2010;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=4;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=4;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=2;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=2;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=2;min_group=3; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=3;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:40;event=forced_group;count=1;external_group_id=3; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=6;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=3010;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=5;wait=0;external_group_id=2;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=5;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=3;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=3;min_group=NO_VALUE_OPTIONAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=6;wait=0;external_group_id=3;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=6;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:73;event=scope_cleaned;process_id=0;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=2010;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=1010;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=10;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=0;delta=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=1;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=1;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=8;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=8;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=1000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=9;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=9;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=2000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=2;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=2;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=10;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=3;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=3;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=11;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=1;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=1;min_group=2; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=2;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:40;event=forced_group;count=1;external_group_id=2; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=10;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=3000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=9;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=9;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=2;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=2;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=2;min_group=3; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=3;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:40;event=forced_group;count=1;external_group_id=3; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=11;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=4000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=10;wait=0;external_group_id=2;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=10;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=3;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=3;min_group=NO_VALUE_OPTIONAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=11;wait=0;external_group_id=3;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=11;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=8;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=8;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:73;event=scope_cleaned;process_id=0;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=3000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=2000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=1000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=0;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=1;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=1;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=12;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=12;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=1000;delta=1000; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:16;event=register_group;external_process_id=0;external_group_id=3;size=1;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:157;event=register_group;external_group_id=3;min_group=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=allocation.cpp:17;event=add;id=13;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:100;name=GLOBAL;event=update;usage=1000;waiting=10;allocated=1;from=1000;to=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=0;external_process_id=0;external_scope_id=0;forced_external_group_id=1;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:27;event=try_allocation;limit=1;external_process_id=0;external_scope_id=0;forced_external_group_id=1;is_priority_process=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=group.cpp:43;event=common_forced_group;count=1;external_group_id=3; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:27;event=allocated;allocation_id=13;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:66;name=GLOBAL;event=allocate;usage=20;delta=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=3;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=3;min_group=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=13;wait=0;external_group_id=3;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=13;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=manager.cpp:26;event=unregister_group;external_process_id=0;external_group_id=1;size=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:145;event=remove_group;external_group_id=1;min_group=NO_VALUE_OPTIONAL; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=process.h:135;event=allocation_unregister;allocation_id=12;wait=0;external_group_id=1;allocation_status=Allocated; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2700;fline=allocation.cpp:62;event=destroy;allocation_id=12;stage=GLOBAL; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2700;fline=process.h:73;event=scope_cleaned;process_id=0;external_scope_id=0; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=10;delta=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2700;fline=stage_features.cpp:89;name=GLOBAL;event=free;usage=0;delta=10; |84.9%| [TS] {RESULT} ydb/core/config/validation/auth_config_validator_ut/unittest |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/tx/limiter/grouped_memory/ut/unittest |84.9%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/security/ticket_parser_ut.cpp >> BlobDepot::TestBlockedEvGetRequest [GOOD] >> BlobDepot::BasicRange |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/viewer/json_handlers_viewer.cpp |84.9%| [TS] {RESULT} ydb/library/yql/providers/dq/provider/ut/unittest >> YtLookupActor::Lookup [GOOD] >> YtLookupActor::LookupMultiMatches [GOOD] >> test.py::test[solomon-BadDownsamplingAggregation-] |84.9%| [TM] {RESULT} ydb/core/tablet_flat/ut_util/unittest |84.9%| [TS] {RESULT} ydb/core/tx/long_tx_service/public/ut/unittest |84.9%| [TM] {RESULT} ydb/core/kqp/federated_query/ut/unittest |84.9%| [TS] {RESULT} ydb/core/tx/limiter/grouped_memory/ut/unittest |84.9%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp >> XdcRdmaTest/XdcRdmaTestCqMode::SendMix/EVENT [FAIL] >> XdcRdmaTest/XdcRdmaTestCqMode::SendMixBig/POLLING >> TRdmaLow/TCqMode::ReadInOneProcessWithQpInterruption/EVENT >> XdcRdmaTest/XdcRdmaTestCqMode::SendMixBig/POLLING [FAIL] >> XdcRdmaTest/XdcRdmaTestCqMode::SendMixBig/EVENT ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/yt/actors/ut/unittest >> YtLookupActor::LookupMultiMatches [GOOD] Test command err: 2026-01-08 15:25:16.305 INFO ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [default] storage.cpp:189: FileStorage initialized in "/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmp3afCw8/", temporary dir: "/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmp3afCw8/46696", locks dir:"/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmp3afCw8/locks", files: 0, total size: 0 2026-01-08 15:25:16.305 INFO ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [YT] yql_yt_lookup_actor.cpp:105: New Yt proivider lookup source actor(ActorId=[1:4:2051]) for cluster=Plato, table=Lookup 2026-01-08 15:25:16.315 DEBUG ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [YT] yql_yt_lookup_actor.cpp:179: ActorId=[1:4:2051] Got LookupRequest for 4 keys 2026-01-08 15:25:16.343 INFO ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [default] storage.cpp:189: FileStorage initialized in "/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmpm3016b/", temporary dir: "/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmpm3016b/46696", locks dir:"/home/runner/.ya/build/build_root/663n/001814/r3tmp/tmpm3016b/locks", files: 0, total size: 0 2026-01-08 15:25:16.345 INFO ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [YT] yql_yt_lookup_actor.cpp:105: New Yt proivider lookup source actor(ActorId=[2:4:2051]) for cluster=Plato, table=Lookup 2026-01-08 15:25:16.351 DEBUG ydb-library-yql-providers-yt-actors-ut(pid=46696, tid=0x00007F0F59854D40) [YT] yql_yt_lookup_actor.cpp:179: ActorId=[2:4:2051] Got LookupRequest for 4 keys >> Init::TWithDefaultParser [GOOD] >> XdcRdmaTest/XdcRdmaTestCqMode::SendMixBig/EVENT [FAIL] >> StaticNodeSelectorsInit::TestStaticNodeSelectorForActorSystem |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/yt/actors/ut/unittest >> StaticNodeSelectorsInit::TestStaticNodeSelectorForActorSystem [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorWithAnotherLabel >> StaticNodeSelectorsInit::TestStaticNodeSelectorWithAnotherLabel [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorInheritance [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeId >> TTraceId::GenerateTraceIdBench >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeId [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeHost [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind >> TTraceId::GenerateTraceIdBench [GOOD] >> TTraceId::OpenTelemetryHeaderParser [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind [GOOD] >> XdsBootstrapConfig::CanSetHostnameAndDataCenterFromYdbNode >> XdsBootstrapConfig::CanSetHostnameAndDataCenterFromYdbNode [GOOD] >> XdsBootstrapConfig::CanSetDataCenterFromYdbNodeArgument >> TRangeTreap::EarlyQuit [GOOD] >> XdsBootstrapConfig::CanSetDataCenterFromYdbNodeArgument [GOOD] >> XdsBootstrapConfig::CanCheckThatXdsBootstrapIsAbsent [GOOD] >> XdsBootstrapConfig::CanUseNodeIdFromYamlConfig >> XdsBootstrapConfig::CanUseNodeIdFromYamlConfig [GOOD] >> DQCBO::Empty [GOOD] >> DQCBO::JoinSearch2Rels [GOOD] >> DQCBO::JoinSearch3Rels [GOOD] >> DQCBO::JoinSearchYQL19363 [GOOD] >> DQCBO::JoinSearchYT24403 [GOOD] >> DQCBO::RelCollector [GOOD] >> DQCBO::RelCollectorBrokenEquiJoin [GOOD] >> DQCBO::DqOptimizeEquiJoinWithCostsNative [GOOD] >> DQCBO::DqOptimizeEquiJoinWithCostsPG >> DQCBO::DqOptimizeEquiJoinWithCostsPG [GOOD] >> HypergraphBuild::SimpleChain3NodesTransitiveClosure [GOOD] >> HypergraphBuild::SimpleChain4NodesTransitiveClosure >> HypergraphBuild::SimpleChain4NodesTransitiveClosure [GOOD] >> HypergraphBuild::SimpleChain5NodesTransitiveClosure [GOOD] >> HypergraphBuild::ComplexTransitiveClosure [GOOD] >> HypergraphBuild::SimpleDimpleJoin [GOOD] >> HypergraphBuild::AnyJoinWithTransitiveClosure [GOOD] >> HypergraphBuild::AnyJoinConstraints1 [GOOD] >> HypergraphBuild::AnyJoinConstraints2 [GOOD] >> HypergraphBuild::AnyJoinConstraints3 [GOOD] >> HypergraphBuild::IsReorderableConstraint [GOOD] >> HypergraphBuild::JoinKindConflictSimple [GOOD] >> HypergraphBuild::CrossJoinEliminationSimple [GOOD] >> HypergraphBuild::CrossJoinEliminationComplex [GOOD] >> HypergraphBuild::SimpleCycle [GOOD] >> HypergraphBuild::TransitiveClosurePlusCycle [GOOD] >> HypergraphBuild::CondsThatMayCauseATransitiveClosureButTheyMustNot [GOOD] >> HypergraphBuild::TransitiveClosureManyCondsBetweenJoin [GOOD] >> HypergraphBuild::ManyCondsBetweenJoinForTransitiveClosure [GOOD] >> HypergraphBuild::TwoJoinsTwoTheSameKeyInCyclicGraphTransitiveClosure [GOOD] >> HypergraphBuild::JoinTopologiesBenchmark [GOOD] >> ClosedIntervalSet::Union [GOOD] >> ClosedIntervalSet::Difference ------- [TS] {asan, default-linux-x86_64, release} ydb/library/actors/wilson/ut/unittest >> TTraceId::OpenTelemetryHeaderParser [GOOD] Test command err: 14057937409695561499 188462 uS per test, 18 ns per one traceId generation |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/actors/wilson/ut/unittest >> InterestingOrderingsShuffle::TwoOneItemEquivOnly [GOOD] >> InterestingOrderingsShuffle::ManyOneItemEquivOnly [GOOD] >> InterestingOrderingsShuffle::ConsideringOldFDs [GOOD] >> InterestingOrderingsShuffle::Join64ChainImitation >> InterestingOrderingsShuffle::Join64ChainImitation [GOOD] >> InterestingOrderingsShuffle::ManyItems [GOOD] >> InterestingOrderingsShuffle::PruningFDs [GOOD] >> InterestingOrderingsSorting::PrefixClosure [GOOD] >> InterestingOrderingsSorting::SimpleImplicationFD [GOOD] >> InterestingOrderingsSorting::EquivWithImplicationFDs [GOOD] >> InterestingOrderingsSorting::EquivReplaceElements [GOOD] >> InterestingOrderingsSorting::ComplexPrefixClosure [GOOD] >> InterestingOrderingsSorting::ConstantFD [GOOD] >> InterestingOrderingsSorting::ComplexOrderingsWithMultipleImplicationsAndEquivalences |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/config/init/ut/unittest >> XdsBootstrapConfig::CanUseNodeIdFromYamlConfig [GOOD] |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/config/init/ut/unittest >> BlobDepot::BasicRange [GOOD] >> BlobDepot::BasicDiscover ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] Test command err: 2026-01-08T15:24:43.388346Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:3072} PDiskId# 1 ownerId# 9 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 9 ownerRound# 101 lsn# 14 PDiskId# 1 2026-01-08T15:24:46.006871Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:3072} PDiskId# 1 ownerId# 3 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 3 ownerRound# 101 lsn# 11 PDiskId# 1 |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/actors/interconnect/ut_rdma/gtest >> XdcRdmaTest/XdcRdmaTestCqMode::SendMixBig/EVENT [FAIL] Test command err: ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:272: Value of: serializedRope.has_value() Actual: false Expected: true C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:14.577395Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012184579427506:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:14.694635Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012183846908699:2048] [node 1] ICP01 ready to work 2026-01-08T15:25:14.682587Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [1:7593012184197717289:2048] [node 2] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:14.810730Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012187903050811:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:14.886828Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [1:7593012184378770179:2048] [node 2] ICP01 ready to work 2026-01-08T15:25:14.914664Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012187630092341:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:14.978379Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [1:7593012187472943348:2048] [node 2] ICP01 ready to work 2026-01-08T15:25:14.982846Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012184448023566:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:16.026789Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [1:7593012190061613591:2048] [node 2] ICP01 ready to work 2026-01-08T15:25:16.034134Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012195454196922:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:17.095760Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012197018120924:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. 2026-01-08T15:25:17.250592Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593012196793818627:2048] [node 1] ICP01 ready to work C++ exception with description "bad_optional_access" thrown in the test body. ================================================================= ==46551==ERROR: LeakSanitizer: detected memory leaks Direct leak of 528 byte(s) in 6 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000000b0d674 in TEventsForTest::Generate(unsigned int, NInterconnect::NRdma::IMemPool*) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:120:23 #2 0x000000adb8de in TEventsForTest /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:110:9 #3 0x000000adb8de in XdcRdmaTestCqMode_SendMixBig_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:466:20 #4 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #5 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #6 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #7 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #8 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #9 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #10 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #11 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #12 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #13 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #14 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #15 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Direct leak of 176 byte(s) in 2 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000000ac5c7c in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:181:15 #2 0x000000ad9931 in XdcRdmaTestCqMode_SendMix_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:452:20 #3 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #4 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #5 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #6 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #7 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #8 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #9 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #12 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #13 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #14 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Direct leak of 88 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000000ac3b6b in MakeMultuGlueTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:160:15 #2 0x000000ad84f1 in XdcRdmaTest_SendRdmaWithMultiGlue_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:405:16 #3 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #4 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #5 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #6 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #7 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #8 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #9 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #12 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #13 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #14 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Direct leak of 88 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000000ac5c7c in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:181:15 #2 0x000000ad3a01 in XdcRdmaTest_SendRdma_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:311:16 #3 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #4 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #5 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #6 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #7 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #8 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #9 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #12 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #13 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #14 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Direct leak of 88 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x000000ac5c7c in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/a ... s/protobuf/src/google/protobuf/arenastring.h:371:17 #2 0x00000160b0a3 in google::protobuf::internal::ArenaStringPtr::Set(TBasicString>&&, google::protobuf::Arena*) /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.cc:171:5 #3 0x000000ac3cfe in Set /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:421:3 #4 0x000000ac3cfe in SetBytes /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:446:3 #5 0x000000ac3cfe in set_buffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1967:18 #6 0x000000ac3cfe in SetBuffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1428:46 #7 0x000000ac3cfe in MakeMultuGlueTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:162:16 #8 0x000000ad84f1 in XdcRdmaTest_SendRdmaWithMultiGlue_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:405:16 #9 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #12 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #13 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #14 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #15 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #16 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #17 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #18 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #19 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #20 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x00000160b0a3 in NewString > > /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:371:17 #2 0x00000160b0a3 in google::protobuf::internal::ArenaStringPtr::Set(TBasicString>&&, google::protobuf::Arena*) /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.cc:171:5 #3 0x000000ac5e01 in Set /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:421:3 #4 0x000000ac5e01 in SetBytes /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:446:3 #5 0x000000ac5e01 in set_buffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1967:18 #6 0x000000ac5e01 in SetBuffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1428:46 #7 0x000000ac5e01 in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:183:16 #8 0x000000ad7778 in XdcRdmaTest_SendRdmaWithGlue_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:381:16 #9 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #12 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #13 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #14 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #15 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #16 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #17 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #18 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #19 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #20 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x00000160b0a3 in NewString > > /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:371:17 #2 0x00000160b0a3 in google::protobuf::internal::ArenaStringPtr::Set(TBasicString>&&, google::protobuf::Arena*) /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.cc:171:5 #3 0x000000ac5e01 in Set /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:421:3 #4 0x000000ac5e01 in SetBytes /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:446:3 #5 0x000000ac5e01 in set_buffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1967:18 #6 0x000000ac5e01 in SetBuffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1428:46 #7 0x000000ac5e01 in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:183:16 #8 0x000000ad5c74 in XdcRdmaTest_SendRdmaWithRegionOffset_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:333:16 #9 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #12 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #13 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #14 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #15 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #16 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #17 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #18 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #19 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #20 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x000000c0d85d in operator new(unsigned long) /-S/contrib/libs/clang20-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x00000160b0a3 in NewString > > /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:371:17 #2 0x00000160b0a3 in google::protobuf::internal::ArenaStringPtr::Set(TBasicString>&&, google::protobuf::Arena*) /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.cc:171:5 #3 0x000000ac5e01 in Set /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:421:3 #4 0x000000ac5e01 in SetBytes /-S/contrib/libs/protobuf/src/google/protobuf/arenastring.h:446:3 #5 0x000000ac5e01 in set_buffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1967:18 #6 0x000000ac5e01 in SetBuffer /-B/ydb/library/actors/interconnect/ut/protos/interconnect_test.pb.h:1428:46 #7 0x000000ac5e01 in MakeTestEvent(unsigned long, NInterconnect::NRdma::IMemPool*, bool, bool) /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:183:16 #8 0x000000ad3a01 in XdcRdmaTest_SendRdma_Test::TestBody() /-S/ydb/library/actors/interconnect/ut_rdma/rdma_xdc_ut.cpp:311:16 #9 0x000000fcd0e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #10 0x000000fcd0e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #11 0x000000fcce10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #12 0x000000fcf695 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #13 0x000000fd2541 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #14 0x000000ffd58e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #15 0x000000ffbf84 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #16 0x000000ffbf84 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #17 0x000000ffbaf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #18 0x000001057e9f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #19 0x000001057e9f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #20 0x7f5deb5ebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) SUMMARY: AddressSanitizer: 22048 byte(s) leaked in 49 allocation(s). >> InterestingOrderingsSorting::ComplexOrderingsWithMultipleImplicationsAndEquivalences [GOOD] >> InterestingOrderingsSorting::TPCH8 [GOOD] >> InterestingOrderingsSorting::DifferentDirections [GOOD] >> InterestingOrderingsSorting::DifferentDirectionsEquivWithImplication [GOOD] >> InterestingOrderingsSorting::IncompatibleDirsSortingsPrefixClosure [GOOD] >> TRUCalculatorTests::TestReadTable [GOOD] >> TRUCalculatorTests::TestBulkUpsert [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/library/actors/interconnect/ut_rdma/gtest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/actors/interconnect/rdma/ut/gtest >> TRdmaLow/TCqMode::ReadInOneProcessWithQpInterruption/EVENT Test command err: ydb/library/actors/interconnect/rdma/ut/allocator_ut.cpp:35: Value of: mr Actual: false Expected: true unable to register memory region found: uverbs0 rxe_lo /sys/class/infiniband_verbs/uverbs0 port 1 speed: 1 port: 1 0 port: 1 1 port: 1 2 port: 1 3 port: 1 4 port: 1 5 port: 1 6 port: 1 7 port: 1 8 port: 1 9 port: 1 10 port: 1 11 port: 1 12 port: 1 13 port: 1 14 port: 1 15 port: 1 16 port: 1 17 port: 1 18 port: 1 19 port: 1 20 port: 1 21 port: 1 22 port: 1 23 port: 1 24 port: 1 25 port: 1 26 port: 1 27 port: 1 28 port: 1 29 port: 1 30 port: 1 31 port: 1 32 port: 1 33 port: 1 34 port: 1 35 port: 1 36 port: 1 37 port: 1 38 port: 1 39 port: 1 40 port: 1 41 port: 1 42 port: 1 43 port: 1 44 port: 1 45 port: 1 46 port: 1 47 port: 1 48 port: 1 49 port: 1 50 port: 1 51 port: 1 52 port: 1 53 port: 1 54 port: 1 55 port: 1 56 port: 1 57 port: 1 58 port: 1 59 port: 1 60 port: 1 61 port: 1 62 port: 1 63 port: 1 64 port: 1 65 port: 1 66 port: 1 67 port: 1 68 port: 1 69 port: 1 70 port: 1 71 port: 1 72 port: 1 73 port: 1 74 port: 1 75 port: 1 76 port: 1 77 port: 1 78 port: 1 79 port: 1 80 port: 1 81 port: 1 82 port: 1 83 port: 1 84 port: 1 85 port: 1 86 port: 1 87 port: 1 88 port: 1 89 port: 1 90 port: 1 91 port: 1 92 port: 1 93 port: 1 94 port: 1 95 port: 1 96 port: 1 97 port: 1 98 port: 1 99 port: 1 100 port: 1 101 port: 1 102 port: 1 103 port: 1 104 port: 1 105 port: 1 106 port: 1 107 port: 1 108 port: 1 109 port: 1 110 port: 1 111 port: 1 112 port: 1 113 port: 1 114 port: 1 115 port: 1 116 port: 1 117 port: 1 118 port: 1 119 port: 1 120 port: 1 121 port: 1 122 port: 1 123 port: 1 124 port: 1 125 port: 1 126 port: 1 127 port: 1 128 port: 1 129 port: 1 130 port: 1 131 port: 1 132 port: 1 133 port: 1 134 port: 1 135 port: 1 136 port: 1 137 port: 1 138 port: 1 139 port: 1 140 port: 1 141 port: 1 142 port: 1 143 port: 1 144 port: 1 145 port: 1 146 port: 1 147 port: 1 148 port: 1 149 port: 1 150 port: 1 151 port: 1 152 port: 1 153 port: 1 154 port: 1 155 port: 1 156 port: 1 157 port: 1 158 port: 1 159 port: 1 160 port: 1 161 port: 1 162 port: 1 163 port: 1 164 port: 1 165 port: 1 166 port: 1 167 port: 1 168 port: 1 169 port: 1 170 port: 1 171 port: 1 172 port: 1 173 port: 1 174 port: 1 175 port: 1 176 port: 1 177 port: 1 178 port: 1 179 port: 1 180 port: 1 181 port: 1 182 port: 1 183 port: 1 184 port: 1 185 port: 1 186 port: 1 187 port: 1 188 port: 1 189 port: 1 190 port: 1 191 port: 1 192 port: 1 193 port: 1 194 port: 1 195 port: 1 196 port: 1 197 port: 1 198 port: 1 199 port: 1 200 port: 1 201 port: 1 202 port: 1 203 port: 1 204 port: 1 205 port: 1 206 port: 1 207 port: 1 208 port: 1 209 port: 1 210 port: 1 211 port: 1 212 port: 1 213 port: 1 214 port: 1 215 port: 1 216 port: 1 217 port: 1 218 port: 1 219 port: 1 220 port: 1 221 port: 1 222 port: 1 223 port: 1 224 port: 1 225 port: 1 226 port: 1 227 port: 1 228 port: 1 229 port: 1 230 port: 1 231 port: 1 232 port: 1 233 port: 1 234 port: 1 235 port: 1 236 port: 1 237 port: 1 238 port: 1 239 port: 1 240 port: 1 241 port: 1 242 port: 1 243 port: 1 244 port: 1 245 port: 1 246 port: 1 247 port: 1 248 port: 1 249 port: 1 250 port: 1 251 port: 1 252 port: 1 253 port: 1 254 port: 1 255 port: 1 256 port: 1 257 port: 1 258 port: 1 259 port: 1 260 port: 1 261 port: 1 262 port: 1 263 port: 1 264 port: 1 265 port: 1 266 port: 1 267 port: 1 268 port: 1 269 port: 1 270 port: 1 271 port: 1 272 port: 1 273 port: 1 274 port: 1 275 port: 1 276 port: 1 277 port: 1 278 port: 1 279 port: 1 280 port: 1 281 port: 1 282 port: 1 283 port: 1 284 port: 1 285 port: 1 286 port: 1 287 port: 1 288 port: 1 289 port: 1 290 port: 1 291 port: 1 292 port: 1 293 port: 1 294 port: 1 295 port: 1 296 port: 1 297 port: 1 298 port: 1 299 port: 1 300 port: 1 301 port: 1 302 port: 1 303 port: 1 304 port: 1 305 port: 1 306 port: 1 307 port: 1 308 port: 1 309 port: 1 310 port: 1 311 port: 1 312 port: 1 313 port: 1 314 port: 1 315 port: 1 316 port: 1 317 port: 1 318 port: 1 319 port: 1 320 port: 1 321 port: 1 322 port: 1 323 port: 1 324 port: 1 325 port: 1 326 port: 1 327 port: 1 328 port: 1 329 port: 1 330 port: 1 331 port: 1 332 port: 1 333 port: 1 334 port: 1 335 port: 1 336 port: 1 337 port: 1 338 port: 1 339 port: 1 340 port: 1 341 port: 1 342 port: 1 343 port: 1 344 port: 1 345 port: 1 346 port: 1 347 port: 1 348 port: 1 349 port: 1 350 port: 1 351 port: 1 352 port: 1 353 port: 1 354 port: 1 355 port: 1 356 port: 1 357 port: 1 358 port: 1 359 port: 1 360 port: 1 361 port: 1 362 port: 1 363 port: 1 364 port: 1 365 port: 1 366 port: 1 367 port: 1 368 port: 1 369 port: 1 370 port: 1 371 port: 1 372 port: 1 373 port: 1 374 port: 1 375 port: 1 376 port: 1 377 port: 1 378 port: 1 379 port: 1 380 port: 1 381 port: 1 382 port: 1 383 port: 1 384 port: 1 385 port: 1 386 port: 1 387 port: 1 388 port: 1 389 port: 1 390 port: 1 391 port: 1 392 port: 1 393 port: 1 394 port: 1 395 port: 1 396 port: 1 397 port: 1 398 port: 1 399 port: 1 400 port: 1 401 port: 1 402 port: 1 403 port: 1 404 port: 1 405 port: 1 406 port: 1 407 port: 1 408 port: 1 409 port: 1 410 port: 1 411 port: 1 412 port: 1 413 port: 1 414 port: 1 415 port: 1 416 port: 1 417 port: 1 418 port: 1 419 port: 1 420 port: 1 421 port: 1 422 port: 1 423 port: 1 424 port: 1 425 port: 1 426 port: 1 427 port: 1 428 port: 1 429 port: 1 430 port: 1 431 port: 1 432 port: 1 433 port: 1 434 port: 1 435 port: 1 436 port: 1 437 port: 1 438 port: 1 439 port: 1 440 port: 1 441 port: 1 442 port: 1 443 port: 1 444 port: 1 445 port: 1 446 port: 1 447 port: 1 448 port: 1 449 port: 1 450 port: 1 451 port: 1 452 port: 1 453 port: 1 454 port: 1 455 port: 1 456 port: 1 457 port: 1 458 port: 1 459 port: 1 460 port: 1 461 port: 1 462 port: 1 463 port: 1 464 port: 1 465 port: 1 466 port: 1 467 port: 1 468 port: 1 469 port: 1 470 port: 1 471 port: 1 472 port: 1 473 port: 1 474 port: 1 475 port: 1 476 port: 1 477 port: 1 478 port: 1 479 port: 1 480 port: 1 481 port: 1 482 port: 1 483 port: 1 484 port: 1 485 port: 1 486 port: 1 487 port: 1 488 port: 1 489 port: 1 490 port: 1 491 port: 1 492 port: 1 493 port: 1 494 port: 1 495 port: 1 496 port: 1 497 port: 1 498 port: 1 499 port: 1 500 port: 1 501 port: 1 502 port: 1 503 port: 1 504 port: 1 505 port: 1 506 port: 1 507 port: 1 508 port: 1 509 port: 1 510 port: 1 511 port: 1 512 port: 1 513 port: 1 514 port: 1 515 port: 1 516 port: 1 517 port: 1 518 port: 1 519 port: 1 520 port: 1 521 port: 1 522 port: 1 523 port: 1 524 port: 1 525 port: 1 526 port: 1 527 port: 1 528 port: 1 529 port: 1 530 port: 1 531 port: 1 532 port: 1 533 port: 1 534 port: 1 535 port: 1 536 port: 1 537 port: 1 538 port: 1 539 port: 1 540 port: 1 541 port: 1 542 port: 1 543 port: 1 544 port: 1 545 port: 1 546 port: 1 547 port: 1 548 port: 1 549 port: 1 550 port: 1 551 port: 1 552 port: 1 553 port: 1 554 port: 1 555 port: 1 556 port: 1 557 port: 1 558 port: 1 559 port: 1 560 port: 1 561 port: 1 562 port: 1 563 port: 1 564 port: 1 565 port: 1 566 port: 1 567 port: 1 568 port: 1 569 port: 1 570 port: 1 571 port: 1 572 port: 1 573 port: 1 574 port: 1 575 port: 1 576 port: 1 577 port: 1 578 port: 1 579 port: 1 580 port: 1 581 port: 1 582 port: 1 583 port: 1 584 port: 1 585 port: 1 586 port: 1 587 port: 1 588 port: 1 589 port: 1 590 port: 1 591 port: 1 592 port: 1 593 port: 1 594 port: 1 595 port: 1 596 port: 1 597 port: 1 598 port: 1 599 port: 1 600 port: 1 601 port: 1 602 port: 1 603 port: 1 604 port: 1 605 port: 1 606 port: 1 607 port: 1 608 port: 1 609 port: 1 610 port: 1 611 port: 1 612 port: 1 613 port: 1 614 port: 1 615 port: 1 616 port: 1 617 port: 1 618 port: 1 619 port: 1 620 port: 1 621 port: 1 622 port: 1 623 port: 1 624 port: 1 625 port: 1 626 port: 1 627 port: 1 628 port: 1 629 port: 1 630 port: 1 631 port: 1 632 port: 1 633 port: 1 634 port: 1 635 port: 1 636 port: 1 637 port: 1 638 port: 1 639 port: 1 640 port: 1 641 port: 1 642 port: 1 643 port: 1 644 port: 1 645 port: 1 646 port: 1 647 port: 1 648 port: 1 649 port: 1 650 port: 1 651 port: 1 652 port: 1 653 port: 1 654 port: 1 655 port: 1 656 port: 1 657 port: 1 658 port: 1 659 port: 1 660 port: 1 661 port: 1 662 port: 1 663 port: 1 664 port: 1 665 port: 1 666 port: 1 667 port: 1 668 port: 1 669 port: 1 670 port: 1 671 port: 1 672 port: 1 673 port: 1 674 port: 1 675 port: 1 676 port: 1 677 port: 1 678 port: 1 679 port: 1 680 port: 1 681 port: 1 682 port: 1 683 port: 1 684 port: 1 685 port: 1 686 port: 1 687 port: 1 688 port: 1 689 port: 1 690 port: 1 691 port: 1 692 port: 1 693 port: 1 694 port: 1 695 port: 1 696 port: 1 697 port: 1 698 port: 1 699 port: 1 700 port: 1 701 port: 1 702 port: 1 703 port: 1 704 port: 1 705 port: 1 706 port: 1 707 port: 1 708 port: 1 709 port: 1 710 port: 1 711 port: 1 712 port: 1 713 port: 1 714 port: 1 715 port: 1 716 port: 1 717 port: 1 718 port: 1 719 port: 1 720 port: 1 721 port: 1 722 port: 1 723 port: 1 724 port: 1 725 port: 1 726 port: 1 727 port: 1 728 port: 1 729 port: 1 730 port: 1 731 port: 1 732 port: 1 733 port: 1 734 port: 1 735 port: 1 736 port: 1 737 port: 1 738 port: 1 739 port: 1 740 port: 1 741 port: 1 742 port: 1 743 port: 1 744 port: 1 745 port: 1 746 port: 1 747 port: 1 748 port: 1 749 port: 1 750 port: 1 751 port: 1 752 port: 1 753 port: 1 754 port: 1 755 port: 1 756 port: 1 757 port: 1 758 port: 1 759 port: 1 760 port: 1 761 port: 1 762 port: 1 763 port: 1 764 port: 1 765 port: 1 766 port: 1 767 port: 1 768 port: 1 769 port: 1 770 port: 1 771 port: 1 772 port: 1 773 port: 1 774 port: 1 775 port: 1 776 port: 1 777 port: 1 778 port: 1 779 port: 1 780 port: 1 781 port: 1 782 port: 1 783 port: 1 784 port: 1 785 port: 1 786 port: 1 787 port: 1 788 port: 1 789 port: 1 790 port: 1 791 port: 1 792 port: 1 793 port: 1 794 port: 1 795 port: 1 796 port: 1 797 port: 1 798 port: 1 799 port: 1 800 port: 1 801 port: 1 802 port: 1 803 port: 1 804 port: 1 805 port: 1 806 port: 1 807 port: 1 808 port: 1 809 port: 1 810 port: 1 811 port: 1 812 port: 1 813 port: 1 814 port: 1 815 port: 1 816 port: 1 817 port: 1 818 port: 1 819 port: 1 820 port: 1 821 port: 1 822 port: 1 823 port: 1 824 port: 1 825 port: 1 826 port: 1 827 port: 1 828 port: 1 829 port: 1 830 port: 1 831 port: 1 832 port: 1 833 port: 1 834 port: 1 835 port: 1 836 port: 1 837 port: 1 838 port: 1 839 port: 1 840 port: 1 841 port: 1 842 port: 1 843 port: 1 844 port: 1 845 port: 1 846 port: 1 847 port: 1 848 port: 1 849 port: 1 850 port: 1 851 port: 1 852 port: 1 853 port: 1 854 port: 1 855 port: 1 856 port: 1 857 port: 1 858 port: 1 859 port: 1 860 port: 1 861 port: 1 862 port: 1 863 port: 1 864 port: 1 865 port: 1 866 port: 1 867 port: 1 868 port: 1 869 port: 1 870 port: 1 871 port: 1 872 port: 1 873 port: 1 874 port: 1 875 port: 1 876 port: 1 877 port: 1 878 port: 1 879 port: 1 880 port: 1 881 port: 1 882 port: 1 883 port: 1 884 port: 1 885 port: 1 886 port: 1 887 port: 1 888 port: 1 889 port: 1 890 port: 1 891 port: 1 892 port: 1 893 port: 1 894 port: 1 895 port: 1 896 port: 1 897 port: 1 898 port: 1 899 port: 1 900 port: 1 901 port: 1 902 port: 1 903 port: 1 904 port: 1 905 port: 1 906 port: 1 907 port: 1 908 port: 1 909 port: 1 910 port: 1 911 port: 1 912 port: 1 913 port: 1 914 por ... nect-rdma-ut+0xa8d028) (BuildId: 5f8f7b7d5a52dc74928fd3e854dece32923b4a29) ==46056==Register values: rax = 0x0000000000000010 rbx = 0x00007ffc6bf34120 rcx = 0x0000000000000002 rdx = 0x0000000000000001 rdi = 0x0000000000000000 rsi = 0x0000000000000000 rbp = 0x00007ffc6bf34110 rsp = 0x00007ffc6bf34110 r8 = 0x0000000000001000 r9 = 0x00007dd3cf0ea000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00007b93ce173000 r13 = 0x00007b93ce173020 r14 = 0x00007b93ce173060 r15 = 0x00000f72f9c26600 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==46056==ABORTING Using verbs context: {device_name# rxe_lo port_num# 1 gid_index# 2 gid# ::1}, on addr: ::1 AddressSanitizer:DEADLYSIGNAL ================================================================= ==46550==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x000001bd018c bp 0x7ffce514ce30 sp 0x7ffce514ce30 T0) ==46550==The signal is caused by a READ memory access. ==46550==Hint: address points to the zero page. #0 0x000001bd018c in Get /-S/util/generic/ptr.h:592:16 #1 0x000001bd018c in AsT /-S/util/generic/ptr.h:153:50 #2 0x000001bd018c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x000001bd018c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x000000b0ddf7 in AllocSourceRegion /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:29:17 #5 0x000000b0ddf7 in DoReadInOneProcess(TBasicString>, NInterconnect::NRdma::ECqMode) /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:38:17 #6 0x000000b0f6fe in TCqMode_ReadInOneProcessIpV6_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:51:5 #7 0x000000f7c784 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #8 0x000000f7c784 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #9 0x000000f7c4b0 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #10 0x000000f7ed35 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #11 0x000000f81be1 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #12 0x000000facc2e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #13 0x000000fab624 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #14 0x000000fab624 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #15 0x000000fab196 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #16 0x00000100753f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #17 0x00000100753f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #18 0x7f7d07024d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #19 0x7f7d07024e3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #20 0x000000a8d028 in _start (/home/runner/.ya/build/build_root/663n/00212b/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut+0xa8d028) (BuildId: 5f8f7b7d5a52dc74928fd3e854dece32923b4a29) ==46550==Register values: rax = 0x0000000000000010 rbx = 0x00007ffce514ce40 rcx = 0x0000000000000002 rdx = 0x0000000000000001 rdi = 0x0000000000000000 rsi = 0x0000000000000000 rbp = 0x00007ffce514ce30 rsp = 0x00007ffce514ce30 r8 = 0x0000000000001000 r9 = 0x00007dbd061ea000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00007b7d05273000 r13 = 0x00007b7d05273020 r14 = 0x00007b7d05273060 r15 = 0x00000f7020a46600 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==46550==ABORTING Using verbs context: {device_name# rxe_lo port_num# 1 gid_index# 1 gid# ::ffff:127.0.0.1}, on addr: 127.0.0.1 AddressSanitizer:DEADLYSIGNAL ================================================================= ==46672==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x000001bd018c bp 0x7ffea8cac190 sp 0x7ffea8cac190 T0) ==46672==The signal is caused by a READ memory access. ==46672==Hint: address points to the zero page. #0 0x000001bd018c in Get /-S/util/generic/ptr.h:592:16 #1 0x000001bd018c in AsT /-S/util/generic/ptr.h:153:50 #2 0x000001bd018c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x000001bd018c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x000000b103ca in AllocSourceRegion /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:29:17 #5 0x000000b103ca in TCqMode_ReadInOneProcessWithQpInterruption_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:72:21 #6 0x000000f7c784 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #7 0x000000f7c784 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #8 0x000000f7c4b0 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #9 0x000000f7ed35 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #10 0x000000f81be1 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #11 0x000000facc2e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #12 0x000000fab624 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #13 0x000000fab624 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #14 0x000000fab196 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #15 0x00000100753f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #16 0x00000100753f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #17 0x7f481236fd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x7f481236fe3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #19 0x000000a8d028 in _start (/home/runner/.ya/build/build_root/663n/00212b/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut+0xa8d028) (BuildId: 5f8f7b7d5a52dc74928fd3e854dece32923b4a29) ==46672==Register values: rax = 0x0000000000000010 rbx = 0x00007ffea8cac1a0 rcx = 0x0000000000000002 rdx = 0x0000000000000001 rdi = 0x0000000000000000 rsi = 0x0000000000000000 rbp = 0x00007ffea8cac190 rsp = 0x00007ffea8cac190 r8 = 0x0000000000001000 r9 = 0x00007d88114ea000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00000f69820d3e00 r13 = 0x00000f69820d3e00 r14 = 0x00007b48106df150 r15 = 0x00007b48106df2a0 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==46672==ABORTING Using verbs context: {device_name# rxe_lo port_num# 1 gid_index# 1 gid# ::ffff:127.0.0.1}, on addr: 127.0.0.1 AddressSanitizer:DEADLYSIGNAL ================================================================= ==47082==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x000001bd018c bp 0x7ffd3686c6b0 sp 0x7ffd3686c6b0 T0) ==47082==The signal is caused by a READ memory access. ==47082==Hint: address points to the zero page. #0 0x000001bd018c in Get /-S/util/generic/ptr.h:592:16 #1 0x000001bd018c in AsT /-S/util/generic/ptr.h:153:50 #2 0x000001bd018c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x000001bd018c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x000000b103ca in AllocSourceRegion /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:29:17 #5 0x000000b103ca in TCqMode_ReadInOneProcessWithQpInterruption_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut/rdma_low_ut.cpp:72:21 #6 0x000000f7c784 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #7 0x000000f7c784 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #8 0x000000f7c4b0 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #9 0x000000f7ed35 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #10 0x000000f81be1 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #11 0x000000facc2e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #12 0x000000fab624 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #13 0x000000fab624 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #14 0x000000fab196 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #15 0x00000100753f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #16 0x00000100753f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #17 0x7fb352aebd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x7fb352aebe3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #19 0x000000a8d028 in _start (/home/runner/.ya/build/build_root/663n/00212b/ydb/library/actors/interconnect/rdma/ut/ydb-library-actors-interconnect-rdma-ut+0xa8d028) (BuildId: 5f8f7b7d5a52dc74928fd3e854dece32923b4a29) ==47082==Register values: rax = 0x0000000000000010 rbx = 0x00007ffd3686c6c0 rcx = 0x0000000000000002 rdx = 0x0000000000000001 rdi = 0x0000000000000000 rsi = 0x0000000000000000 rbp = 0x00007ffd3686c6b0 rsp = 0x00007ffd3686c6b0 r8 = 0x0000000000001000 r9 = 0x00007df351bea000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00000f76ea1b3e00 r13 = 0x00000f76ea1b3e00 r14 = 0x00007bb350ddf150 r15 = 0x00007bb350ddf2a0 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==47082==ABORTING |84.9%| [TM] {BAZEL_UPLOAD} ydb/library/actors/interconnect/rdma/ut/gtest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/range_treap/ut_range_treap/unittest >> TRangeTreap::EarlyQuit [GOOD] Test command err: NOTE: building treap of size 1000000 got height 50 and needed 1000000 ops (1000000 inserts 0 updates 0 deletes) and 32085125 comparisons (32.085125 per op) NOTE: building treap of size 8806 got height 28 and needed 10941 ops (9747 inserts 253 updates 941 deletes) and 220193 comparisons (20.12549127 per op) Checking point 1765 ... found 1320 ranges, needed 3487 comparisons (2.641666667 per range) Checking point 8950 ... found 2345 ranges, needed 8322 comparisons (3.548827292 per range) Checking point 2145 ... found 1521 ranges, needed 4195 comparisons (2.758053912 per range) Checking point 4956 ... found 2219 ranges, needed 8260 comparisons (3.722397476 per range) Checking point 9376 ... found 2346 ranges, needed 8258 comparisons (3.520034101 per range) Checking point 3912 ... found 2123 ranges, needed 7240 comparisons (3.410268488 per range) Checking point 3465 ... found 2042 ranges, needed 6595 comparisons (3.229676787 per range) Checking point 654 ... found 532 ranges, needed 1276 comparisons (2.398496241 per range) Checking point 6146 ... found 2215 ranges, needed 8263 comparisons (3.730474041 per range) Checking point 9845 ... found 2309 ranges, needed 8066 comparisons (3.493287137 per range) |84.9%| [TM] {BAZEL_UPLOAD} ydb/library/range_treap/ut_range_treap/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] |84.9%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> TRUCalculatorTests::TestBulkUpsert [GOOD] |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/dq/opt/ut/unittest >> InterestingOrderingsSorting::IncompatibleDirsSortingsPrefixClosure [GOOD] Test command err: Input: Rels: [{rows: 1,cost: 0,vars: [(1,1)]}, {rows: 1,cost: 0,vars: [(2,1)]}] EqClasses: [[(1,1),(2,1)]] Input: 1: {RELOPTINFO :reloptkind 0 :relids (b 1) :rows 1 :consider_startup false :consider_param_startup false :consider_parallel false :reltarget {PATHTARGET :exprs ( {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } ) :sortgrouprefs <> :cost.startup 0 :cost.per_tuple 0 :width 8 :has_volatile_expr 0 } :pathlist ( {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } ) :ppilist <> :partial_pathlist <> :cheapest_startup_path <> :cheapest_total_path {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :cheapest_unique_path <> :cheapest_parameterized_paths <> :direct_lateral_relids (b) :lateral_relids (b) :relid 1 :reltablespace 0 :rtekind 0 :min_attr 0 :max_attr 1 :nulling_relids (b) :lateral_vars <> :lateral_referencers (b) :indexlist <> :statlist <> :pages 1 :tuples 1 :allvisfrac 1 :eclass_indexes (b) :subroot <> :subplan_params <> :rel_parallel_workers -1 :amflags 1 :serverid 0 :userid 0 :useridiscurrent false :unique_for_rels <> :non_unique_for_rels <> :baserestrictinfo <> :baserestrictcost.startup 0 :baserestrictcost.per_tuple 0 :baserestrict_min_security 0 :joininfo <> :has_eclass_joins false :consider_partitionwise_join false :top_parent_relids (b) :nparts 0 :partbounds_merged false :partition_qual <> :live_parts (b) :all_partrels (b) } Input: 2: {RELOPTINFO :reloptkind 0 :relids (b 2) :rows 1 :consider_startup false :consider_param_startup false :consider_parallel false :reltarget {PATHTARGET :exprs ( {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } ) :sortgrouprefs <> :cost.startup 0 :cost.per_tuple 0 :width 8 :has_volatile_expr 0 } :pathlist ( {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } ) :ppilist <> :partial_pathlist <> :cheapest_startup_path <> :cheapest_total_path {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :cheapest_unique_path <> :cheapest_parameterized_paths <> :direct_lateral_relids (b) :lateral_relids (b) :relid 2 :reltablespace 0 :rtekind 0 :min_attr 0 :max_attr 1 :nulling_relids (b) :lateral_vars <> :lateral_referencers (b) :indexlist <> :statlist <> :pages 1 :tuples 1 :allvisfrac 1 :eclass_indexes (b) :subroot <> :subplan_params <> :rel_parallel_workers -1 :amflags 1 :serverid 0 :userid 0 :useridiscurrent false :unique_for_rels <> :non_unique_for_rels <> :baserestrictinfo <> :baserestrictcost.startup 0 :baserestrictcost.per_tuple 0 :baserestrict_min_security 0 :joininfo <> :has_eclass_joins false :consider_partitionwise_join false :top_parent_relids (b) :nparts 0 :partbounds_merged false :partition_qual <> :live_parts (b) :all_partrels (b) } Context: : {PLANNERINFO :parse <> :glob <> :query_level 1 :plan_params <> :outer_params (b) :simple_rel_array ( <> {RELOPTINFO :reloptkind 0 :relids (b 1) :rows 1 :consider_startup false :consider_param_startup false :consider_parallel false :reltarget {PATHTARGET :exprs ( {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } ) :sortgrouprefs <> :cost.startup 0 :cost.per_tuple 0 :width 8 :has_volatile_expr 0 } :pathlist ( {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } ) :ppilist <> :partial_pathlist <> :cheapest_startup_path <> :cheapest_total_path {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :cheapest_unique_path <> :cheapest_parameterized_paths <> :direct_lateral_relids (b) :lateral_relids (b) :relid 1 :reltablespace 0 :rtekind 0 :min_attr 0 :max_attr 1 :nulling_relids (b) :lateral_vars <> :lateral_referencers (b) :indexlist <> :statlist <> :pages 1 :tuples 1 :allvisfrac 1 :eclass_indexes (b 0) :subroot <> :subplan_params <> :rel_parallel_workers -1 :amflags 1 :serverid 0 :userid 0 :useridiscurrent false :unique_for_rels <> :non_unique_for_rels <> :baserestrictinfo <> :baserestrictcost.startup 0 :baserestrictcost.per_tuple 0 :baserestrict_min_security 0 :joininfo <> :has_eclass_joins false :consider_partitionwise_join false :top_parent_relids (b) :nparts 0 :partbounds_merged false :partition_qual <> :live_parts (b) :all_partrels (b) } {RELOPTINFO :reloptkind 0 :relids (b 2) :rows 1 :consider_startup false :consider_param_startup false :consider_parallel false :reltarget {PATHTARGET :exprs ( {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } ) :sortgrouprefs <> :cost.startup 0 :cost.per_tuple 0 :width 8 :has_volatile_expr 0 } :pathlist ( {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } ) :ppilist <> :partial_pathlist <> :cheapest_startup_path <> :cheapest_total_path {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :cheapest_unique_path <> :cheapest_parameterized_paths <> :direct_lateral_relids (b) :lateral_relids (b) :relid 2 :reltablespace 0 :rtekind 0 :min_attr 0 :max_attr 1 :nulling_relids (b) :lateral_vars <> :lateral_referencers (b) :indexlist <> :statlist <> :pages 1 :tuples 1 :allvisfrac 1 :eclass_indexes (b 0) :subroot <> :subplan_params <> :rel_parallel_workers -1 :amflags 1 :serverid 0 :userid 0 :useridiscurrent false :unique_for_rels <> :non_unique_for_rels <> :baserestrictinfo <> :baserestrictcost.startup 0 :baserestrictcost.per_tuple 0 :baserestrict_min_security 0 :joininfo <> :has_eclass_joins false :consider_partitionwise_join false :top_parent_relids (b) :nparts 0 :partbounds_merged false :partition_qual <> :live_parts (b) :all_partrels (b) } ) :simple_rel_array_size 3 :all_baserels (b 1 2) :outer_join_rels (b) :all_query_rels (b) :join_rel_list <> :join_cur_level 0 :init_plans <> :cte_plan_ids <> :multiexpr_params <> :join_domains <> :eq_classes ( {EQUIVALENCECLASS :ec_opfamilies (o 1976) :ec_collation 0 :ec_members ( {EQUIVALENCEMEMBER :em_expr {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } :em_relids (b 1) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } {EQUIVALENCEMEMBER :em_expr {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } :em_relids (b 2) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } ) :ec_sources <> :ec_derives <> :ec_relids (b 1 2) :ec_has_const false :ec_has_volatile false :ec_broken false :ec_sortref 0 :ec_min_security 0 :ec_max_security 0 } ) :ec_merging_done true :canon_pathkeys <> :left_join_clauses <> :right_join_clauses <> :full_join_clauses <> :join_info_list <> :last_rinfo_serial 0 :all_result_relids (b) :leaf_result_relids (b) :append_rel_list <> :row_identity_vars <> :rowMarks <> :placeholder_list <> :fkey_list <> :query_pathkeys <> :group_pathkeys <> :num_groupby_pathkeys 0 :window_pathkeys <> :distinct_pathkeys <> :sort_pathkeys <> :processed_groupClause <> :processed_distinctClause <> :processed_tlist <> :update_colnos <> :minmax_aggs <> :total_table_pages 0 :tuple_fraction 0 :limit_tuples 0 :qual_security_level 0 :hasJoinRTEs false :hasLateralRTEs false :hasHavingQual false :hasPseudoConstantQuals false :hasAlternativeSubPlans false :placeholdersFrozen false :hasRecursion false :agginfos <> :aggtransinfos <> :numOrderedAggs 0 :hasNonPartialAggs false :hasNonSerialAgg ... :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } :em_relids (b 1) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :right_em {EQUIVALENCEMEMBER :em_expr {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } :em_relids (b 2) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :outer_is_left true :hashjoinoperator 410 :left_bucketsize -1 :right_bucketsize -1 :left_mcvfreq -1 :right_mcvfreq -1 :left_hasheqoperator 98 :right_hasheqoperator 98 } ) } :cheapest_total_path {NESTPATH :jpath.path.pathtype 335 :parent_relids (b 1 2) :required_outer (b) :jpath.path.parallel_aware false :jpath.path.parallel_safe false :jpath.path.parallel_workers 0 :jpath.path.rows 1 :jpath.path.startup_cost 0 :jpath.path.total_cost 100000.015 :jpath.path.pathkeys <> :jpath.jointype 0 :jpath.inner_unique false :jpath.outerjoinpath {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :jpath.innerjoinpath {MATERIALPATH :path.pathtype 339 :parent_relids (b 2) :required_outer (b) :path.parallel_aware false :path.parallel_safe false :path.parallel_workers 0 :path.rows 1 :path.startup_cost 0 :path.total_cost 0.005 :path.pathkeys <> :subpath {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } } :jpath.joinrestrictinfo ( {RESTRICTINFO :clause {OPEXPR :opno 410 :opfuncid 467 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ( {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } ) :location -1 } :is_pushed_down true :can_join true :pseudoconstant false :has_clone false :is_clone false :leakproof false :has_volatile 2 :security_level 0 :num_base_rels 2 :clause_relids (b 1 2) :required_relids (b 1 2) :incompatible_relids (b) :outer_relids (b) :left_relids (b 1) :right_relids (b 2) :orclause <> :rinfo_serial 1 :eval_cost.startup 0 :eval_cost.per_tuple 100000 :norm_selec 1 :outer_selec -1 :mergeopfamilies <> :left_em {EQUIVALENCEMEMBER :em_expr {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } :em_relids (b 1) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :right_em {EQUIVALENCEMEMBER :em_expr {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } :em_relids (b 2) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :outer_is_left true :hashjoinoperator 410 :left_bucketsize -1 :right_bucketsize -1 :left_mcvfreq -1 :right_mcvfreq -1 :left_hasheqoperator 98 :right_hasheqoperator 98 } ) } :cheapest_unique_path <> :cheapest_parameterized_paths ( {NESTPATH :jpath.path.pathtype 335 :parent_relids (b 1 2) :required_outer (b) :jpath.path.parallel_aware false :jpath.path.parallel_safe false :jpath.path.parallel_workers 0 :jpath.path.rows 1 :jpath.path.startup_cost 0 :jpath.path.total_cost 100000.015 :jpath.path.pathkeys <> :jpath.jointype 0 :jpath.inner_unique false :jpath.outerjoinpath {PATH :pathtype 318 :parent_relids (b 1) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } :jpath.innerjoinpath {MATERIALPATH :path.pathtype 339 :parent_relids (b 2) :required_outer (b) :path.parallel_aware false :path.parallel_safe false :path.parallel_workers 0 :path.rows 1 :path.startup_cost 0 :path.total_cost 0.005 :path.pathkeys <> :subpath {PATH :pathtype 318 :parent_relids (b 2) :required_outer (b) :parallel_aware false :parallel_safe false :parallel_workers 0 :rows 1 :startup_cost 0 :total_cost 0 :pathkeys <> } } :jpath.joinrestrictinfo ( {RESTRICTINFO :clause {OPEXPR :opno 410 :opfuncid 467 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 0 :args ( {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } ) :location -1 } :is_pushed_down true :can_join true :pseudoconstant false :has_clone false :is_clone false :leakproof false :has_volatile 2 :security_level 0 :num_base_rels 2 :clause_relids (b 1 2) :required_relids (b 1 2) :incompatible_relids (b) :outer_relids (b) :left_relids (b 1) :right_relids (b 2) :orclause <> :rinfo_serial 1 :eval_cost.startup 0 :eval_cost.per_tuple 100000 :norm_selec 1 :outer_selec -1 :mergeopfamilies <> :left_em {EQUIVALENCEMEMBER :em_expr {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1 } :em_relids (b 1) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :right_em {EQUIVALENCEMEMBER :em_expr {VAR :varno 2 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1 } :em_relids (b 2) :em_is_const false :em_is_child false :em_datatype 20 :em_jdomain <> } :outer_is_left true :hashjoinoperator 410 :left_bucketsize -1 :right_bucketsize -1 :left_mcvfreq -1 :right_mcvfreq -1 :left_hasheqoperator 98 :right_hasheqoperator 98 } ) } ) :direct_lateral_relids (b) :lateral_relids (b) :relid 0 :reltablespace 0 :rtekind 2 :min_attr 0 :max_attr 0 :nulling_relids (b) :lateral_vars <> :lateral_referencers (b) :indexlist <> :statlist <> :pages 0 :tuples 0 :allvisfrac 0 :eclass_indexes (b) :subroot <> :subplan_params <> :rel_parallel_workers -1 :amflags 0 :serverid 0 :userid 0 :useridiscurrent false :unique_for_rels <> :non_unique_for_rels <> :baserestrictinfo <> :baserestrictcost.startup 0 :baserestrictcost.per_tuple 0 :baserestrict_min_security 4294967295 :joininfo <> :has_eclass_joins false :consider_partitionwise_join false :top_parent_relids (b) :nparts -1 :partbounds_merged false :partition_qual <> :live_parts (b) :all_partrels (b) } Result: Rows: 1.00 TotalCost: 100000.01 { Inner Join Loop Strategy Rels: [1,2] Op: (1,1) = (2,1) { Node Rels: [1] } { Node Rels: [2] } } test is not running for ASAN! Time of fsm construction: 0.000168s Time of fsm construction: 0.000262s Time of fsm construction: 0.000159s Time of fsm construction: 0.100021s ContainsShuffle count: 9 Time of fsm construction: 0.000517s Time of fsm construction: 0.000095s Time of fsm construction: 0.000062s Time of fsm construction: 0.000096s Time of fsm construction: 0.000513s Time of fsm construction: 0.000111s Time of fsm construction: 0.000704s Time of fsm construction: 0.002859s Time of fsm construction: 0.147918s Time of fsm construction: 0.000558s Time of fsm construction: 0.000231s Time of fsm construction: 0.000078s Time of fsm construction: 0.000027s |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/yql/dq/opt/ut/unittest |84.9%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> Splitter::Crit [GOOD] >> Splitter::CritSimple |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> BlobDepot::BasicDiscover [GOOD] >> BlobDepot::BasicBlock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] Test command err: RandomSeed# 6204277131973008540 |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] Test command err: RandomSeed# 10496063392998785918 |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] |84.9%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> BlobDepot::BasicBlock [GOOD] >> BlobDepot::BasicCollectGarbage >> TDqSolomonWriteActorTest::TestWriteFormat [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] >> Splitter::CritSimple [GOOD] >> TBlobStorageHullFresh::AppendixPerf [GOOD] >> TBlobStorageHullFresh::AppendixPerf_Tune |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizeMinusOne [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest >> BlobDepot::BasicCollectGarbage [GOOD] >> BlobDepot::VerifiedRandom ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/splitter/ut/unittest >> Splitter::CritSimple [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=seria ... 82944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=71282912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964800;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964800;columns=1; |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/tx/columnshard/splitter/ut/unittest >> SplitterTest::Generic [GOOD] >> Mirror3of4::ReplicationSmall [GOOD] >> Mirror3of4::ReplicationHuge >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/formats/arrow/ut/unittest >> SplitterTest::Generic [GOOD] Test command err: 1056768 528384 264192 6176 8192 2052 16384 16384 3,3,4 33333,33333,33334 10231,10231,10231,10231,10231,10231,10231,10231,10231,10231,10231,10232,10232,10232 |84.9%| [TS] {BAZEL_UPLOAD} ydb/library/formats/arrow/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> Dictionary::Simple [GOOD] >> Dictionary::ComparePayloadAndFull >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/query/ut/unittest >> TQueryResultSizeTrackerTest::SerializeDeserializeMaxPtotobufSizePlusOne [GOOD] |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/query/ut/unittest |84.9%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BridgeGet::PartRestorationAcrossBridgeOnDiscover [GOOD] |84.9%| [TA] $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchSolomon >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> BridgeGet::PartRestorationAcrossBridgeOnDiscover [GOOD] Test command err: RandomSeed# 17416476394093477930 readBody# 1 mask1# 0 mask2# 0 mask3# 0 *** performing bridge discover maxId#[0:0:0:0:0:0:0] readBody# 1 mask1# 0 mask2# 0 mask3# 1 *** performing bridge discover maxId#[100501:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 2 *** performing bridge discover maxId#[100502:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 3 *** performing bridge discover maxId#[100503:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 4 *** performing bridge discover maxId#[100504:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 5 *** performing bridge discover maxId#[100505:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 6 *** performing bridge discover maxId#[100506:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 0 mask3# 7 *** performing bridge discover maxId#[100507:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 0 *** performing bridge discover maxId#[100508:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 1 *** performing bridge discover maxId#[100509:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 2 *** performing bridge discover maxId#[100510:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 3 *** performing bridge discover maxId#[100511:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 4 *** performing bridge discover maxId#[100512:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 5 *** performing bridge discover maxId#[100513:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 6 *** performing bridge discover maxId#[100514:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 1 mask3# 7 *** performing bridge discover maxId#[100515:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 0 *** performing bridge discover maxId#[100516:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 1 *** performing bridge discover maxId#[100517:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 2 *** performing bridge discover maxId#[100518:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 3 *** performing bridge discover maxId#[100519:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 4 *** performing bridge discover maxId#[100520:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 5 *** performing bridge discover maxId#[100521:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 6 *** performing bridge discover maxId#[100522:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 2 mask3# 7 *** performing bridge discover maxId#[100523:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 0 *** performing bridge discover maxId#[100524:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 1 *** performing bridge discover maxId#[100525:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 2 *** performing bridge discover maxId#[100526:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 3 *** performing bridge discover maxId#[100527:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 4 *** performing bridge discover maxId#[100528:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 5 *** performing bridge discover maxId#[100529:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 6 *** performing bridge discover maxId#[100530:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 3 mask3# 7 *** performing bridge discover maxId#[100531:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 0 *** performing bridge discover maxId#[100532:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 1 *** performing bridge discover maxId#[100533:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 2 *** performing bridge discover maxId#[100534:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 3 *** performing bridge discover maxId#[100535:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 4 *** performing bridge discover maxId#[100536:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 5 *** performing bridge discover maxId#[100537:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 6 *** performing bridge discover maxId#[100538:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 4 mask3# 7 *** performing bridge discover maxId#[100539:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 0 *** performing bridge discover maxId#[100540:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 1 *** performing bridge discover maxId#[100541:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 2 *** performing bridge discover maxId#[100542:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 3 *** performing bridge discover maxId#[100543:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 4 *** performing bridge discover maxId#[100544:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 5 *** performing bridge discover maxId#[100545:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 6 *** performing bridge discover maxId#[100546:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 5 mask3# 7 *** performing bridge discover maxId#[100547:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 0 *** performing bridge discover maxId#[100548:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 1 *** performing bridge discover maxId#[100549:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 2 *** performing bridge discover maxId#[100550:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 3 *** performing bridge discover maxId#[100551:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 4 *** performing bridge discover maxId#[100552:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 5 *** performing bridge discover maxId#[100553:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 6 *** performing bridge discover maxId#[100554:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 6 mask3# 7 *** performing bridge discover maxId#[100555:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 0 *** performing bridge discover maxId#[100556:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 1 *** performing bridge discover maxId#[100557:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 2 *** performing bridge discover maxId#[100558:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 3 *** performing bridge discover maxId#[100559:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 4 *** performing bridge discover maxId#[100560:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 5 *** performing bridge discover maxId#[100561:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 6 *** performing bridge discover maxId#[100562:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 0 mask2# 7 mask3# 7 *** performing bridge discover maxId#[100563:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 0 *** performing bridge discover maxId#[100564:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 1 *** performing bridge discover maxId#[100565:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 2 *** performing bridge discover maxId#[100566:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 3 *** performing bridge discover maxId#[100567:2:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 4 *** performing bridge discover maxId#[100568:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 5 *** performing bridge discover maxId#[100569:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 6 *** performing bridge discover maxId#[100570:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 0 mask3# 7 *** performing bridge discover maxId#[100571:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 1 mask3# 0 *** performing bridge discover maxId#[100572:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 1 mask1# 1 mask2# 1 mask3# 1 *** performing bridge discover maxId#[100573:1:1:0:0:5:0] *** reading from i# 0 *** reading from i# ... iscover maxId#[101450:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 6 mask3# 7 *** performing bridge discover maxId#[101451:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 0 *** performing bridge discover maxId#[101452:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 1 *** performing bridge discover maxId#[101453:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 2 *** performing bridge discover maxId#[101454:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 3 *** performing bridge discover maxId#[101455:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 4 *** performing bridge discover maxId#[101456:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 5 *** performing bridge discover maxId#[101457:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 6 *** performing bridge discover maxId#[101458:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 6 mask2# 7 mask3# 7 *** performing bridge discover maxId#[101459:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 0 *** performing bridge discover maxId#[101460:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 1 *** performing bridge discover maxId#[101461:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 2 *** performing bridge discover maxId#[101462:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 3 *** performing bridge discover maxId#[101463:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 4 *** performing bridge discover maxId#[101464:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 5 *** performing bridge discover maxId#[101465:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 6 *** performing bridge discover maxId#[101466:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 0 mask3# 7 *** performing bridge discover maxId#[101467:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 0 *** performing bridge discover maxId#[101468:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 1 *** performing bridge discover maxId#[101469:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 2 *** performing bridge discover maxId#[101470:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 3 *** performing bridge discover maxId#[101471:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 4 *** performing bridge discover maxId#[101472:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 5 *** performing bridge discover maxId#[101473:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 6 *** performing bridge discover maxId#[101474:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 1 mask3# 7 *** performing bridge discover maxId#[101475:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 0 *** performing bridge discover maxId#[101476:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 1 *** performing bridge discover maxId#[101477:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 2 *** performing bridge discover maxId#[101478:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 3 *** performing bridge discover maxId#[101479:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 4 *** performing bridge discover maxId#[101480:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 5 *** performing bridge discover maxId#[101481:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 6 *** performing bridge discover maxId#[101482:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 2 mask3# 7 *** performing bridge discover maxId#[101483:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 0 *** performing bridge discover maxId#[101484:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 1 *** performing bridge discover maxId#[101485:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 2 *** performing bridge discover maxId#[101486:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 3 *** performing bridge discover maxId#[101487:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 4 *** performing bridge discover maxId#[101488:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 5 *** performing bridge discover maxId#[101489:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 6 *** performing bridge discover maxId#[101490:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 3 mask3# 7 *** performing bridge discover maxId#[101491:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 0 *** performing bridge discover maxId#[101492:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 1 *** performing bridge discover maxId#[101493:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 2 *** performing bridge discover maxId#[101494:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 3 *** performing bridge discover maxId#[101495:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 4 *** performing bridge discover maxId#[101496:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 5 *** performing bridge discover maxId#[101497:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 6 *** performing bridge discover maxId#[101498:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 4 mask3# 7 *** performing bridge discover maxId#[101499:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 0 *** performing bridge discover maxId#[101500:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 1 *** performing bridge discover maxId#[101501:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 2 *** performing bridge discover maxId#[101502:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 3 *** performing bridge discover maxId#[101503:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 4 *** performing bridge discover maxId#[101504:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 5 *** performing bridge discover maxId#[101505:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 6 *** performing bridge discover maxId#[101506:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 5 mask3# 7 *** performing bridge discover maxId#[101507:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 0 *** performing bridge discover maxId#[101508:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 1 *** performing bridge discover maxId#[101509:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 2 *** performing bridge discover maxId#[101510:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 3 *** performing bridge discover maxId#[101511:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 4 *** performing bridge discover maxId#[101512:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 5 *** performing bridge discover maxId#[101513:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 6 *** performing bridge discover maxId#[101514:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 6 mask3# 7 *** performing bridge discover maxId#[101515:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 0 *** performing bridge discover maxId#[101516:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 1 *** performing bridge discover maxId#[101517:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 2 *** performing bridge discover maxId#[101518:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 3 *** performing bridge discover maxId#[101519:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 4 *** performing bridge discover maxId#[101520:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 5 *** performing bridge discover maxId#[101521:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 6 *** performing bridge discover maxId#[101522:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 readBody# 0 mask1# 7 mask2# 7 mask3# 7 *** performing bridge discover maxId#[101523:3:1:0:0:5:0] *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> Dictionary::ComparePayloadAndFull [GOOD] >> Hash::ScalarBinaryHash [GOOD] >> Hash::ScalarCTypeHash [GOOD] >> Hash::ScalarCompositeHash [GOOD] >> Printer::SSAToPrettyString >> Printer::SSAToPrettyString [GOOD] >> ProgramStep::Round0 [GOOD] >> ProgramStep::Round1 >> ProgramStep::Round1 [GOOD] >> ProgramStep::Filter [GOOD] >> ProgramStep::Add [GOOD] >> ProgramStep::Substract [GOOD] >> ProgramStep::Multiply [GOOD] >> ProgramStep::Divide >> ProgramStep::Divide [GOOD] >> ProgramStep::Gcd [GOOD] >> ProgramStep::Lcm [GOOD] >> ProgramStep::Mod [GOOD] >> ProgramStep::ModOrZero [GOOD] >> ProgramStep::Abs [GOOD] >> ProgramStep::Negate [GOOD] >> ProgramStep::Compares ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] Test command err: =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2026-01-08T15:23:48.895108Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2026-01-08T15:23:48.895174Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:3:2050] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:48.895335Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2026-01-08T15:23:48.895362Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [1:6:2053] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:48.895426Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2026-01-08T15:23:48.895465Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:3:2050] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:48.895631Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2026-01-08T15:23:48.895659Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [1:6:2053] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:48.895742Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][1:39:2069][/root/tenant] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[1:24339059:0], [1:1099535966835:0], [1:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:48.896042Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:43:2069] 2026-01-08T15:23:48.896081Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:3:2050] Upsert description: path# /root/tenant 2026-01-08T15:23:48.896157Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:48.896254Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:44:2069] 2026-01-08T15:23:48.896274Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:6:2053] Upsert description: path# /root/tenant 2026-01-08T15:23:48.896296Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:48.896362Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:45:2069] 2026-01-08T15:23:48.896378Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [1:9:2056] Upsert description: path# /root/tenant 2026-01-08T15:23:48.896399Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:48.896433Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:3:2050] 2026-01-08T15:23:48.896487Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2026-01-08T15:23:48.896532Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:6:2053] 2026-01-08T15:23:48.896557Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2026-01-08T15:23:48.896585Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:45:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:9:2056] 2026-01-08T15:23:48.896616Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2026-01-08T15:23:48.896695Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:40:2069] 2026-01-08T15:23:48.896741Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:41:2069] 2026-01-08T15:23:48.896770Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][1:39:2069][/root/tenant] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:23:48.896822Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:42:2069] 2026-01-08T15:23:48.896852Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][1:39:2069][/root/tenant] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2026-01-08T15:23:48.897006Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:791: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 103 2026-01-08T15:23:48.897033Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:831: [1:3:2050] Update description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2026-01-08T15:23:48.897076Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:559: [1:3:2050] Upsert description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /root/tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2026-01-08T15:23:48.897186Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:3:2050] 2026-01-08T15:23:48.897220Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2026-01-08T15:23:48.897261Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:40:2069] 2026-01-08T15:23:48.897305Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:855: [main][1:39:2069][/root/tenant] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2026-01-08T15:23:49.368935Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2026-01-08T15:23:49.369029Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:3:2050] Successful handshake: owner# 800, generation# 1 2026-01-08T15:23:49.369214Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [3:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2026-01-08T15:23:49.369252Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [3:3:2050] Commit generation: owner# 800, generation# 1 2026-01-08T15:23:49.369303Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2026-01-08T15:23:49.369336Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [3:6:2053] Successful handshake: owner# 900, generation# 1 2026-01-08T15:23:49.369565Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2026-01-08T15:23:49.369615Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [3:6:2053] Commit generation: owner# 900, generation# 1 2026-01-08T15:23:49.369726Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][3:39:2069][/root/tenant] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[3:24339059:0], [3:1099535966835:0], [3:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:23:49.370145Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:43:2069] 2026-01-08T15:23:49.370183Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:3:2050] Upsert description: path# /root/tenant 2026-01-08T15:23:49.370282Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:3:2050] Subscribe: subscriber# [3:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:49.370480Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:44:2069] 2026-01-08T15:23:49.370514Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:6:2053] Upsert description: path# /root/tenant 2026-01-08T15:23:49.370568Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:6:2053] Subscribe: subscriber# [3:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:49.370697Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [3:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:45:2069] 2026-01-08T15:23:49.370723Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [3:9:2056] Upsert description: path# /root/tenant 2026-01-08T15:23:49.370762Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [3:9:2056] Subscribe: subscriber# [3:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:23:49.370839Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:3:2050] 2026-01-08T15:23:49.370895Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [3:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:43:2069] 2026-01-08T15:23:49.370967Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][3:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:6:2053] 2026-01-08 ... 94: [399:3:2050] Commit generation: owner# 910, generation# 1 2026-01-08T15:25:34.502637Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [399:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2026-01-08T15:25:34.502663Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [399:6:2053] Successful handshake: owner# 910, generation# 1 2026-01-08T15:25:34.502825Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [399:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2026-01-08T15:25:34.502853Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [399:6:2053] Commit generation: owner# 910, generation# 1 2026-01-08T15:25:34.502943Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][399:39:2069][/Root/Tenant/table_inside] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[399:24339059:0], [399:1099535966835:0], [399:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:25:34.503339Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [399:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:43:2069] 2026-01-08T15:25:34.503366Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [399:3:2050] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:34.503428Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [399:3:2050] Subscribe: subscriber# [399:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:34.503564Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [399:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:44:2069] 2026-01-08T15:25:34.503589Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [399:6:2053] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:34.503622Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [399:6:2053] Subscribe: subscriber# [399:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:34.503742Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [399:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:45:2069] 2026-01-08T15:25:34.503764Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [399:9:2056] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:34.503799Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [399:9:2056] Subscribe: subscriber# [399:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:34.503854Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][399:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:3:2050] 2026-01-08T15:25:34.503900Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [399:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:43:2069] 2026-01-08T15:25:34.503939Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][399:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:6:2053] 2026-01-08T15:25:34.503976Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [399:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:44:2069] 2026-01-08T15:25:34.504016Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][399:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:9:2056] 2026-01-08T15:25:34.504053Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [399:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:45:2069] 2026-01-08T15:25:34.504122Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:40:2069] 2026-01-08T15:25:34.504178Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:41:2069] 2026-01-08T15:25:34.504224Z node 399 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][399:39:2069][/Root/Tenant/table_inside] Set up state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:25:34.504280Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:42:2069] 2026-01-08T15:25:34.504319Z node 399 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][399:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 2026-01-08T15:25:35.027262Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [401:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [401:36:2066] 2026-01-08T15:25:35.027316Z node 401 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [401:3:2050] Successful handshake: owner# 910, generation# 1 2026-01-08T15:25:35.027433Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [401:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [401:36:2066] 2026-01-08T15:25:35.027465Z node 401 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [401:3:2050] Commit generation: owner# 910, generation# 1 2026-01-08T15:25:35.027508Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:760: [401:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [401:37:2067] 2026-01-08T15:25:35.027536Z node 401 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:778: [401:6:2053] Successful handshake: owner# 910, generation# 1 2026-01-08T15:25:35.027659Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:967: [401:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [401:37:2067] 2026-01-08T15:25:35.027688Z node 401 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:994: [401:6:2053] Commit generation: owner# 910, generation# 1 2026-01-08T15:25:35.027846Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1023: [main][401:39:2069][/Root/Tenant/table_inside] Handle {EvResolveReplicasList ReplicaGroups: [{Replicas: [[401:24339059:0], [401:1099535966835:0], [401:2199047594611:0]] WriteOnly: 0 State: 0}]} 2026-01-08T15:25:35.028239Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [401:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [401:43:2069] 2026-01-08T15:25:35.028271Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [401:3:2050] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:35.028336Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [401:3:2050] Subscribe: subscriber# [401:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:35.028477Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [401:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [401:44:2069] 2026-01-08T15:25:35.028502Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [401:6:2053] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:35.028538Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [401:6:2053] Subscribe: subscriber# [401:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:35.028664Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1070: [401:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [401:45:2069] 2026-01-08T15:25:35.028688Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:529: [401:9:2056] Upsert description: path# /Root/Tenant/table_inside 2026-01-08T15:25:35.028723Z node 401 :SCHEME_BOARD_REPLICA INFO: replica.cpp:655: [401:9:2056] Subscribe: subscriber# [401:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2026-01-08T15:25:35.028785Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][401:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:3:2050] 2026-01-08T15:25:35.028836Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [401:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [401:43:2069] 2026-01-08T15:25:35.028903Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][401:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:6:2053] 2026-01-08T15:25:35.028945Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [401:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [401:44:2069] 2026-01-08T15:25:35.028990Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:359: [replica][401:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:9:2056] 2026-01-08T15:25:35.029031Z node 401 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1098: [401:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [401:45:2069] 2026-01-08T15:25:35.029107Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][401:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:40:2069] 2026-01-08T15:25:35.029174Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][401:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:41:2069] 2026-01-08T15:25:35.029224Z node 401 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:849: [main][401:39:2069][/Root/Tenant/table_inside] Set up state: owner# [401:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2026-01-08T15:25:35.029268Z node 401 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:814: [main][401:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [401:42:2069] 2026-01-08T15:25:35.029307Z node 401 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:867: [main][401:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [401:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() |84.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_subscriber/unittest >> ProgramStep::Compares [GOOD] >> ProgramStep::Logic0 [GOOD] >> ProgramStep::Logic1 [GOOD] >> ProgramStep::StartsWith [GOOD] >> ProgramStep::EndsWith [GOOD] >> ProgramStep::MatchSubstring [GOOD] >> ProgramStep::StartsWithIgnoreCase [GOOD] >> ProgramStep::EndsWithIgnoreCase [GOOD] >> ProgramStep::MatchSubstringIgnoreCase >> ProgramStep::MatchSubstringIgnoreCase [GOOD] >> ProgramStep::ScalarTest [GOOD] >> ProgramStep::TestValueFromNull [GOOD] >> ProgramStep::MergeFilterSimple >> ProgramStep::MergeFilterSimple [GOOD] >> ProgramStep::Projection [GOOD] >> ProgramStep::MinMax [GOOD] >> ProgramStep::Sum [GOOD] >> ProgramStep::SumGroupBy [GOOD] >> ProgramStep::SumGroupByNotNull [GOOD] >> ProgramStep::MinMaxSomeGroupBy [GOOD] >> ProgramStep::MinMaxSomeGroupByNotNull [GOOD] >> Slicer::SplitBySizes [GOOD] >> SortableBatchPosition::FindPosition [GOOD] >> SortableBatchPosition::MergingSortedInputStreamReversedWithOneSearchPoint [GOOD] >> SortableBatchPosition::MergingSortedInputStreamReversedWithRangeSearch [GOOD] |84.9%| [TA] $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TS] {asan, default-linux-x86_64, release} ydb/core/formats/arrow/ut/unittest >> SortableBatchPosition::MergingSortedInputStreamReversedWithRangeSearch [GOOD] Test command err: Process: 100000d;/100000; 10000d;/10000; NO_CODEC(poolsize=1024;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=1024;keylen=10) 0.1534132783 0.2482180533 NO_CODEC(poolsize=1024;keylen=16) 0.1104676508 0.2045372848 NO_CODEC(poolsize=1024;keylen=32) 0.06592569055 0.1591802296 NO_CODEC(poolsize=1024;keylen=64) 0.03972180035 0.1324717476 NO_CODEC(poolsize=128;keylen=1) 0.2016566193 0.2164784476 NO_CODEC(poolsize=128;keylen=10) 0.07304169975 0.08752922393 NO_CODEC(poolsize=128;keylen=16) 0.05151637558 0.06514358749 NO_CODEC(poolsize=128;keylen=32) 0.02919093319 0.04189888314 NO_CODEC(poolsize=128;keylen=64) 0.01605694811 0.02821124922 NO_CODEC(poolsize=16;keylen=1) 0.2010010074 0.2099570542 NO_CODEC(poolsize=16;keylen=10) 0.0719219365 0.07635285397 NO_CODEC(poolsize=16;keylen=16) 0.05039654131 0.05396013899 NO_CODEC(poolsize=16;keylen=32) 0.02807102527 0.03070808446 NO_CODEC(poolsize=16;keylen=64) 0.01493699686 0.01701612239 NO_CODEC(poolsize=1;keylen=1) 0.2008730831 0.2086845872 NO_CODEC(poolsize=1;keylen=10) 0.07177339648 0.07487027428 NO_CODEC(poolsize=1;keylen=16) 0.0502445638 0.05244238527 NO_CODEC(poolsize=1;keylen=32) 0.02791992658 0.0291982148 NO_CODEC(poolsize=1;keylen=64) 0.01478641518 0.01551089526 NO_CODEC(poolsize=512;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=512;keylen=10) 0.1482943606 0.1971260763 NO_CODEC(poolsize=512;keylen=16) 0.1053484084 0.1534129488 NO_CODEC(poolsize=512;keylen=32) 0.0608061115 0.1080222928 NO_CODEC(poolsize=512;keylen=64) 0.03460202321 0.08129402495 NO_CODEC(poolsize=64;keylen=1) 0.2013687897 0.2136153969 NO_CODEC(poolsize=64;keylen=10) 0.07240183504 0.08114272681 NO_CODEC(poolsize=64;keylen=16) 0.05087647028 0.05875304549 NO_CODEC(poolsize=64;keylen=32) 0.02855098581 0.03550414104 NO_CODEC(poolsize=64;keylen=64) 0.01541697597 0.02181403389 lz4(poolsize=1024;keylen=1) 0.006629768257 0.05541610349 lz4(poolsize=1024;keylen=10) 0.04233951498 0.3344832994 lz4(poolsize=1024;keylen=16) 0.05657489465 0.404264214 lz4(poolsize=1024;keylen=32) 0.09037137941 0.5318074361 lz4(poolsize=1024;keylen=64) 0.01074936154 0.1063492063 lz4(poolsize=128;keylen=1) 0.003831111821 0.02881389382 lz4(poolsize=128;keylen=10) 0.00718182175 0.06087121933 lz4(poolsize=128;keylen=16) 0.008735936466 0.07523964551 lz4(poolsize=128;keylen=32) 0.01375268158 0.117441454 lz4(poolsize=128;keylen=64) 0.02262360212 0.1850289108 lz4(poolsize=16;keylen=1) 0.00273442178 0.01820340324 lz4(poolsize=16;keylen=10) 0.003078137332 0.02169239789 lz4(poolsize=16;keylen=16) 0.003266503667 0.02356577168 lz4(poolsize=16;keylen=32) 0.003742685614 0.02844311377 lz4(poolsize=16;keylen=64) 0.004937163375 0.03979647465 lz4(poolsize=1;keylen=1) 0.00251497006 0.01603325416 lz4(poolsize=1;keylen=10) 0.002531395234 0.01628089447 lz4(poolsize=1;keylen=16) 0.002515970516 0.01617933723 lz4(poolsize=1;keylen=32) 0.00251450677 0.01630226314 lz4(poolsize=1;keylen=64) 0.002511620933 0.01653353149 lz4(poolsize=512;keylen=1) 0.005362411291 0.04359726295 lz4(poolsize=512;keylen=10) 0.02347472854 0.1933066062 lz4(poolsize=512;keylen=16) 0.03056053336 0.2426853056 lz4(poolsize=512;keylen=32) 0.04856356058 0.3467897492 lz4(poolsize=512;keylen=64) 0.04102771881 0.3228658321 lz4(poolsize=64;keylen=1) 0.003312844256 0.02372010279 lz4(poolsize=64;keylen=10) 0.004839661617 0.03863241259 lz4(poolsize=64;keylen=16) 0.005715507689 0.04687204687 lz4(poolsize=64;keylen=32) 0.007821957352 0.06669044223 lz4(poolsize=64;keylen=64) 0.01258912656 0.1073551894 zstd(poolsize=1024;keylen=1) 0.007324840764 0.0754840827 zstd(poolsize=1024;keylen=10) 0.04506846012 0.3776978417 zstd(poolsize=1024;keylen=16) 0.0655640205 0.4694540288 zstd(poolsize=1024;keylen=32) 0.1110720087 0.6098141264 zstd(poolsize=1024;keylen=64) 0.1914108287 0.7447345433 zstd(poolsize=128;keylen=1) 0.003769847609 0.04002713704 zstd(poolsize=128;keylen=10) 0.007456731695 0.07809798271 zstd(poolsize=128;keylen=16) 0.0102539786 0.1029455519 zstd(poolsize=128;keylen=32) 0.01677217062 0.1578947368 zstd(poolsize=128;keylen=64) 0.03005940945 0.2517949988 zstd(poolsize=16;keylen=1) 0.002620896858 0.02794819359 zstd(poolsize=16;keylen=10) 0.002816201441 0.03048416019 zstd(poolsize=16;keylen=16) 0.003368308096 0.03570300158 zstd(poolsize=16;keylen=32) 0.004159808469 0.0434375 zstd(poolsize=16;keylen=64) 0.005779996974 0.05875115349 zstd(poolsize=1;keylen=1) 0.002461243407 0.02626193724 zstd(poolsize=1;keylen=10) 0.002154636612 0.0234375 zstd(poolsize=1;keylen=16) 0.002356872222 0.02519132653 zstd(poolsize=1;keylen=32) 0.002427911996 0.02573879886 zstd(poolsize=1;keylen=64) 0.00258021431 0.02699269609 zstd(poolsize=512;keylen=1) 0.005583027596 0.05848930481 zstd(poolsize=512;keylen=10) 0.0236929438 0.2237078941 zstd(poolsize=512;keylen=16) 0.03443366072 0.2936507937 zstd(poolsize=512;keylen=32) 0.05917328099 0.4212765957 zstd(poolsize=512;keylen=64) 0.1058929843 0.5749553837 zstd(poolsize=64;keylen=1) 0.00319560285 0.03401360544 zstd(poolsize=64;keylen=10) 0.004852093844 0.05176470588 zstd(poolsize=64;keylen=16) 0.00633344236 0.06557881773 zstd(poolsize=64;keylen=32) 0.009647738439 0.09619952494 zstd(poolsize=64;keylen=64) 0.01626771323 0.1514644351 NO_CODEC --1000 ----1 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----16 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----64 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----128 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----512 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----1024 ------1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% --10000 ---- ... 4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=ReserveMemory; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=FetchOriginalData; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Aggregation; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Projection; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"a\":true,\"i\":\"1,2\",\"p\":{\"options\":[\"{3(Min):[1]}\",\"{4(Min):[2]}\"],\"type\":\"AGGREGATION\",\"keys\":[1]},\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=ReserveMemory; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=FetchOriginalData; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Aggregation; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Projection; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"a\":true,\"i\":\"1,2\",\"p\":{\"options\":[\"{3(Max):[1]}\",\"{4(Max):[2]}\"],\"type\":\"AGGREGATION\",\"keys\":[1]},\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=ReserveMemory; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=FetchOriginalData; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Aggregation; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Projection; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"a\":true,\"i\":\"1,2\",\"p\":{\"options\":[\"{3(Some):[1]}\",\"{4(Some):[2]}\"],\"type\":\"AGGREGATION\",\"keys\":[1]},\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=ReserveMemory; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=FetchOriginalData; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=AssembleOriginalData; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Aggregation; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=334;fline=abstract.cpp:30;execute=Projection; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=272;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=232;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=232;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=248;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=232;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=232;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=248;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:141;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"1111"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"2222"},{"name":"value","value":"8888"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"2222"}],"fields":["id1: int32 not null"]},"reverse":true,"position":1,"records_count":2}; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:157;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"1111"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"1111"},{"name":"value","value":"8888"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"1111"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":2}; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:157;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"1111"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"2222"},{"name":"value","value":"7777"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"2222"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1}; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:141;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"3333"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"4444"},{"name":"value","value":"8888"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"4444"}],"fields":["id1: int32 not null"]},"reverse":true,"position":3,"records_count":4}; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:157;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"3333"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"3333"},{"name":"value","value":"8888"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"3333"}],"fields":["id1: int32 not null"]},"reverse":true,"position":2,"records_count":4}; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=merger.cpp:157;pos={"sorting":{"sorting_columns":[{"name":"id1","value":"3333"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":1};heap={"data":{"sorting_columns":[{"name":"id1","value":"1111"},{"name":"value","value":"7777"}],"fields":["id1: int32 not null","value: int32 not null"]},"sorting":{"sorting_columns":[{"name":"id1","value":"1111"}],"fields":["id1: int32 not null"]},"reverse":true,"position":0,"records_count":2}; |84.9%| [TS] {BAZEL_UPLOAD} ydb/core/formats/arrow/ut/unittest |84.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |84.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |84.9%| [TS] {RESULT} ydb/library/yql/providers/yt/actors/ut/unittest |84.9%| [TS] {RESULT} ydb/library/actors/wilson/ut/unittest |84.9%| [TS] {RESULT} ydb/core/tx/schemeshard/ut_ru_calculator/unittest |84.9%| [TS] {RESULT} ydb/library/yql/dq/opt/ut/unittest |84.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |85.0%| [TS] {RESULT} ydb/core/formats/arrow/ut/unittest |85.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/query/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TS] {RESULT} ydb/core/tx/columnshard/splitter/ut/unittest |85.0%| [TS] {RESULT} ydb/library/formats/arrow/ut/unittest |85.0%| [TS] {RESULT} ydb/core/config/init/ut/unittest |85.0%| [TM] {RESULT} ydb/library/actors/interconnect/ut_rdma/gtest |85.0%| [TM] {RESULT} ydb/library/actors/interconnect/rdma/ut/gtest |85.0%| [TM] {RESULT} ydb/library/range_treap/ut_range_treap/unittest |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |85.0%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] Test command err: RandomSeed# 6231393709720804718 Step = 0 SEND TEvPut with key [1:1:0:0:0:51943:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:51943:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:85877:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:85877:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:192081:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:192081:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:267203:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:267203:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 3 2026-01-08T15:22:59.529194Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 6 SEND TEvPut with key [1:1:6:0:0:377427:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:377427:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2026-01-08T15:22:59.867201Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 7 SEND TEvPut with key [1:1:7:0:0:48850:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:48850:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 8 SEND TEvPut with key [1:1:8:0:0:411812:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:411812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 9 SEND TEvPut with key [1:1:9:0:0:293766:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:293766:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start node 3 Step = 10 SEND TEvPut with key [1:1:10:0:0:127358:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:127358:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 11 SEND TEvPut with key [1:1:11:0:0:282945:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:282945:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 12 SEND TEvPut with key [1:1:12:0:0:34864:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:34864:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 13 SEND TEvPut with key [1:1:13:0:0:363096:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:363096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 15 SEND TEvPut with key [1:1:15:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 16 SEND TEvPut with key [1:1:16:0:0:136892:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:136892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 17 SEND TEvPut with key [1:1:17:0:0:517733:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:517733:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 18 SEND TEvPut with key [1:1:18:0:0:250802:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:250802:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 19 SEND TEvPut with key [1:1:19:0:0:199490:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:199490:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 20 SEND TEvPut with key [1:1:20:0:0:244269:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:244269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 21 SEND TEvPut with key [1:1:21:0:0:329606:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:329606:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 23 SEND TEvPut with key [1:1:23:0:0:519258:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:519258:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 25 SEND TEvPut with key [1:1:25:0:0:514591:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:514591:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Stop node 7 2026-01-08T15:23:02.655487Z 1 00h01m30.100512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 26 SEND TEvPut with key [1:1:26:0:0:5927:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:5927:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 28 SEND TEvPut with key [1:1:28:0:0:6043:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:6043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 30 SEND TEvPut with key [1:1:30:0:0:264716:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:264716:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Compact vdisk 3 Step = 31 SEND TEvPut with key [1:1:31:0:0:168116:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:168116:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 32 SEND TEvPut with key [1:1:32:0:0:444749:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:444749:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 33 SEND TEvPut with key [1:1:33:0:0:350254:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:350254:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 34 SEND TEvPut with key [1:1:34:0:0:145950:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:145950:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 35 SEND TEvPut with key [1:1:35:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 38 SEND TEvPut with key [1:1:38:0:0:185170:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:185170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 39 SEND TEvPut with key [1:1:39:0:0:297271:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:297271:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 40 SEND TEvPut with key [1:1:40:0:0:419670:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:419670:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 41 SEND TEvPut with key [1:1:41:0:0:218956:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:218956:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 42 SEND TEvPut with key [1:1:42:0:0:154723:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:154723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 43 SEND TEvPut with key [1:1:43:0:0:13332:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:13332:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 44 SEND TEvPut with key [1:1:44:0:0:448892:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:448892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 45 SEND TEvPut with key [1:1:45:0:0:103231:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:103231:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 46 SEND TEvPut with key [1:1:46:0:0:295973:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:295973:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 47 SEND TEvPut with key [1:1:47:0:0:402799:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:402799:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 48 SEND TEvPut with key [1:1:48:0:0:165045:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:165045:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 49 SEND TEvPut with key [1:1:49:0:0:360099:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:360099:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 50 SEND TEvPut with key [1:1:50:0:0:97222:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:97222:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 51 SEND TEvPut with key [1:1:51:0:0:303396:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:303396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 52 SEND TEvPut with key [1:1:52:0:0:304876:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:304876:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 53 SEND TEvPut with key [1:1:53:0:0:375063:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:375063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Start node 4 Step = 54 SEND TEvPut with key [1:1:54:0:0:288044:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:288044:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999878} Step = 55 SEND TEvPut with key [1:1:55:0:0:181559:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:181559:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999878} Step = 57 SEND TEvPut with key [1:1:57:0:0:424399:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:424399:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 58 SEND TEvPut with key [1:1:58:0:0:169341:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:169341:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 59 SEND TEvPut with key [1:1:59:0:0:405932:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:405932:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 60 SEND TEvPut with key [1:1:60:0:0:190148:0] TEvPutResult: TEvPutResult {Id# [1:1:60:0:0:190148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Stop node 3 2026-01-08T15:23:06.389156Z 1 00h02m00.150512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Wipe node 0 2026-01-08T15:23:06.608353Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:06.610573Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4862768860829000883] 2026-01-08T15:23:06.773211Z 1 00h02m15.161024s :BS_PROXY ERROR: StateEstablishingSessions Wakeup TIMEOUT Marker# DSP12 GroupId# 2181038080 EstablishingSessionsStateTs# 130.161024s NumUnconnectedDisks# 3 Step = 61 SEND TEvPut with key [1:1:61:0:0:500240:0] TEvPutResult: TEvPutResult {Id# [1:1:61:0:0:500240:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4). GroupId# 2181038080 EstablishingSessionsStateTs# 130.161024s NumUnconnectedDisks# 3" ApproximateFreeSpaceShare# 0} Step = 62 SEND TEvPut with key [1:1:62:0:0:354994:0] TEvPutResult: ... :1:945:0:0:76599:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999768} Compact vdisk 2 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 948 SEND TEvPut with key [1:1:948:0:0:112126:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:112126:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 949 SEND TEvPut with key [1:1:949:0:0:525378:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:525378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 950 SEND TEvPut with key [1:1:950:0:0:410875:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:410875:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 951 SEND TEvPut with key [1:1:951:0:0:113503:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:113503:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 952 SEND TEvPut with key [1:1:952:0:0:431140:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:431140:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 953 SEND TEvPut with key [1:1:953:0:0:509293:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:509293:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Stop node 3 2026-01-08T15:24:54.549011Z 1 00h25m01.112048s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:286395:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:286395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 1 2026-01-08T15:24:55.366180Z 1 00h25m11.112560s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 955 SEND TEvPut with key [1:1:955:0:0:219270:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:219270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Start node 1 Step = 956 SEND TEvPut with key [1:1:956:0:0:274971:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:274971:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 957 SEND TEvPut with key [1:1:957:0:0:487884:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:487884:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Start node 3 Step = 958 SEND TEvPut with key [1:1:958:0:0:327302:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:327302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 961 SEND TEvPut with key [1:1:961:0:0:61147:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:61147:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 962 SEND TEvPut with key [1:1:962:0:0:237906:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:237906:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 963 SEND TEvPut with key [1:1:963:0:0:347273:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:347273:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 964 SEND TEvPut with key [1:1:964:0:0:181317:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:181317:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 965 SEND TEvPut with key [1:1:965:0:0:456096:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:456096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 966 SEND TEvPut with key [1:1:966:0:0:93776:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:93776:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 967 SEND TEvPut with key [1:1:967:0:0:447659:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:447659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 969 SEND TEvPut with key [1:1:969:0:0:92781:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:92781:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 0 2026-01-08T15:24:57.576215Z 9 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [9:116277:348] ServerId# [1:117308:171] TabletId# 72057594037932033 PipeClientId# [9:116277:348] 2026-01-08T15:24:57.576421Z 8 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:146913:17] ServerId# [1:146922:4100] TabletId# 72057594037932033 PipeClientId# [8:146913:17] 2026-01-08T15:24:57.576532Z 7 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:145865:17] ServerId# [1:145874:3973] TabletId# 72057594037932033 PipeClientId# [7:145865:17] 2026-01-08T15:24:57.576645Z 6 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:122918:17] ServerId# [1:122924:1010] TabletId# 72057594037932033 PipeClientId# [6:122918:17] 2026-01-08T15:24:57.576749Z 5 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:142940:17] ServerId# [1:142947:3591] TabletId# 72057594037932033 PipeClientId# [5:142940:17] 2026-01-08T15:24:57.576845Z 4 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:151885:17] ServerId# [1:151894:4701] TabletId# 72057594037932033 PipeClientId# [4:151885:17] 2026-01-08T15:24:57.577009Z 3 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:141851:17] ServerId# [1:141860:3467] TabletId# 72057594037932033 PipeClientId# [3:141851:17] 2026-01-08T15:24:57.577117Z 2 00h25m41.115120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:150926:17] ServerId# [1:150932:4592] TabletId# 72057594037932033 PipeClientId# [2:150926:17] Step = 971 SEND TEvPut with key [1:1:971:0:0:439384:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:439384:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 972 SEND TEvPut with key [1:1:972:0:0:252551:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:252551:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 973 SEND TEvPut with key [1:1:973:0:0:39982:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:39982:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Stop node 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:526796:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:526796:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Start node 0 Step = 975 SEND TEvPut with key [1:1:975:0:0:337763:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:337763:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:475740:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:475740:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 977 SEND TEvPut with key [1:1:977:0:0:169780:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:169780:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 980 SEND TEvPut with key [1:1:980:0:0:159890:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:159890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 981 SEND TEvPut with key [1:1:981:0:0:111300:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:111300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 982 SEND TEvPut with key [1:1:982:0:0:355914:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:355914:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 983 SEND TEvPut with key [1:1:983:0:0:399106:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:399106:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 985 SEND TEvPut with key [1:1:985:0:0:261994:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:261994:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 987 SEND TEvPut with key [1:1:987:0:0:138774:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:138774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 988 SEND TEvPut with key [1:1:988:0:0:441913:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:441913:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 989 SEND TEvPut with key [1:1:989:0:0:134469:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:134469:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 990 SEND TEvPut with key [1:1:990:0:0:123825:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:123825:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 991 SEND TEvPut with key [1:1:991:0:0:40387:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:40387:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 7 2026-01-08T15:25:00.341429Z 1 00h26m21.151536s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 993 SEND TEvPut with key [1:1:993:0:0:455894:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:455894:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Compact vdisk 0 Step = 994 SEND TEvPut with key [1:1:994:0:0:54378:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:54378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 6 Step = 995 SEND TEvPut with key [1:1:995:0:0:487669:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:487669:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 996 SEND TEvPut with key [1:1:996:0:0:194641:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:194641:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 997 SEND TEvPut with key [1:1:997:0:0:74188:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:74188:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 998 SEND TEvPut with key [1:1:998:0:0:136082:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:136082:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 999 SEND TEvPut with key [1:1:999:0:0:145518:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:145518:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Starting nodes Start compaction 1 Start checking |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> TDqHashCombineTest::TestWideModeAggregationWithSpilling [GOOD] >> TDqHashCombineTest::TestWideModeAggregationMultiRowNoSpilling >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] >> BlobDepot::VerifiedRandom [GOOD] >> BlobDepot::LoadPutAndRead >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] >> TestFilterSet::FilterGroup >> KqpRm::SnapshotSharingByExchanger >> KqpRm::Reduce >> KqpRm::ResourceBrokerNotEnoughResources >> KqpRm::NodesMembershipByExchanger >> KqpRm::NotEnoughMemory |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |85.0%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable >> TBlobStorageGroupInfoBlobMapTest::BelongsToSubgroupBenchmark [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BasicChecks >> KqpRm::SingleTask |85.0%| [PY] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/objcopy_e64be2702e6aadcfe4f62214e0.o |85.0%| [PY] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/objcopy_e64be2702e6aadcfe4f62214e0.o >> KqpRm::ManyTasks >> KqpRm::NotEnoughExecutionUnits >> KqpRm::DisonnectNodes >> KqpRm::SingleSnapshotByExchanger >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] >> KqpRm::NotEnoughMemory [GOOD] >> KqpRm::Reduce [GOOD] >> KqpRm::ManyTasks [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] >> KqpRm::NotEnoughExecutionUnits [GOOD] >> KqpRm::SingleTask [GOOD] >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughMemory [GOOD] Test command err: 2026-01-08T15:25:44.037955Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.044608Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003183/r3tmp/tmphsDzxl/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.045131Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003183/r3tmp/tmphsDzxl/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003183/r3tmp/tmphsDzxl/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13241312864441914067 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.135572Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.135864Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.176063Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.176197Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.176262Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.176313Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.176406Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.176462Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.176511Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.176530Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.176711Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.211249Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.211513Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.211598Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.211839Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.211981Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.212010Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.212089Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.212280Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.212310Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.212373Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.212459Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.212995Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.213283Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.213502Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.213989Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.214166Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.218400Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.218693Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.218881Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.218955Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::Reduce [GOOD] Test command err: 2026-01-08T15:25:44.178985Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.179463Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/0030fd/r3tmp/tmp2zDhLJ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.180005Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/0030fd/r3tmp/tmp2zDhLJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/0030fd/r3tmp/tmp2zDhLJ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11376526019328214524 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.240551Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.240884Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.278439Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.278607Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.278674Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.278731Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.278818Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.278877Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.278933Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.278954Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.279132Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.311553Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.311871Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.311956Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.312211Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.312359Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.312391Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.312479Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.312666Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.312691Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.312744Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.312846Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.313346Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.313616Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.313806Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.314217Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.314423Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.314588Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.314788Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.314945Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.315029Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.317494Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.317574Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.317653Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.317705Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.317756Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.317950Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.318145Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task kqp-1-1-1 (1 by [1:470:2350]) (priority=0 type=kqp_query resources={0, 30} resubmit=0) 2026-01-08T15:25:44.318186Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.318226Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.075000 (insert task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.318274Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 70, Free Tier: 0, ExecutionUnits: 0. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughExecutionUnits [GOOD] Test command err: 2026-01-08T15:25:44.574991Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.575437Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/00317f/r3tmp/tmp3NheRC/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.577428Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/00317f/r3tmp/tmp3NheRC/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/00317f/r3tmp/tmp3NheRC/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 7348534096962372348 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.695669Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.695980Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.736051Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.736167Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.736230Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.736286Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.736362Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.736406Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.736450Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.736469Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.736621Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.785628Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.785875Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.785952Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.786185Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.786305Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.794419Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.794553Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.794809Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.794836Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.794891Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.795001Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.795527Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.795781Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.795969Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.796358Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.796530Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.796656Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.796859Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.797019Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.797094Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NodesMembershipByExchanger [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleTask [GOOD] Test command err: 2026-01-08T15:25:44.507523Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.508060Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003181/r3tmp/tmpLMmdSF/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.508602Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003181/r3tmp/tmpLMmdSF/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003181/r3tmp/tmpLMmdSF/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1705042517656718275 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.576862Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.577197Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.607443Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.607579Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.607642Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.607696Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.607779Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.607830Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.607881Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.607901Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.608068Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.652689Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.652992Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.653082Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.653349Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.653493Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.653528Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.653616Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.653857Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.653887Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.653952Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.654051Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.662375Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.662735Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.662989Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.663437Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.663639Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.663805Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.664028Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.664204Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.664331Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.670971Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.671056Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.671124Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.671171Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.671216Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.671408Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.671610Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:44.671654Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.000000 (remove task kqp-1-2-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.671700Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] Test command err: 2026-01-08T15:25:43.898682Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:43.899140Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/00317a/r3tmp/tmpyupRzD/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:43.899632Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/00317a/r3tmp/tmpyupRzD/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/00317a/r3tmp/tmpyupRzD/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 15595878277872351657 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:43.968460Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:43.968780Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:43.998068Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:43.998194Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:43.998252Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:43.998305Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:43.998417Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:43.998477Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:43.998539Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:43.998561Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:43.998736Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.024828Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2026-01-08T15:25:44.025084Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.025177Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.025415Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.025563Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.025594Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.025695Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.025871Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.025896Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.025953Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2026-01-08T15:25:44.026062Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.032857Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.033174Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.033396Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.033861Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.034061Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.034265Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.034527Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.034709Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.034789Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.037253Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:470:2350]) priority=0 resources={0, 1000} 2026-01-08T15:25:44.037313Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.037374Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 1000} for task kqp-1-2-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.037417Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.037456Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 2.500000 (insert task kqp-1-2-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.037627Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 1000ExternalMemory: 0 } 2026-01-08T15:25:44.037698Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:470:2350]) priority=0 resources={0, 100000} 2026-01-08T15:25:44.037746Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.037792Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task kqp-1-2-2 (2 by [1:470:2350]) 2026-01-08T15:25:44.037823Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:499: Removing task kqp-1-2-2 (2 by [1:470:2350]) 2026-01-08T15:25:44.037896Z node 1 :KQP_RESOURCE_MANAGER NOTICE: kqp_rm_service.cpp:338: TxId: 1, taskId: 2. Not enough memory for query, requested: 100000. TxResourcesInfo { TxId: 1, Database: , tx initially granted memory: 0B, tx total memory allocations: 1000B, tx largest successful memory allocation: 1000B, tx last failed memory allocation: 0B, tx total execution units: 0, started at: 2026-01-08T15:25:44.037177Z } |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ManyTasks [GOOD] Test command err: 2026-01-08T15:25:44.383318Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.383826Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/00318b/r3tmp/tmpLZXXAa/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.384357Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/00318b/r3tmp/tmpLZXXAa/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/00318b/r3tmp/tmpLZXXAa/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9911589682238147493 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.468680Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.469005Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.501264Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.501405Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.501477Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.501536Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.501617Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.501680Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.501735Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.501758Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.501928Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.562697Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.562961Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.563053Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.563319Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.563485Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.563514Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.563601Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.563790Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.563818Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.563879Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.563966Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.564520Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.564802Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.565032Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.565516Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.565707Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.565868Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.566066Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.566235Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.574430Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.576976Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.577051Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577112Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.577161Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577203Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.577374Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.577547Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.577578Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577611Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-2 (2 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.577658Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577688Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-1-2-2 (2 by [1:470:2350])) 2026-01-08T15:25:44.577720Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.577833Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-3-3 (3 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.577860Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-3-3 (3 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577901Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-3-3 (3 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.577924Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-3-3 (3 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.577947Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.750000 (insert task kqp-1-3-3 (3 by [1:470:2350])) 2026-01-08T15:25:44.577971Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 3. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.578060Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-4-4 (4 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.578083Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-4-4 (4 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.578109Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-4-4 (4 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.578152Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-4-4 (4 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.578177Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.750000 to 1.000000 (insert task kqp-1-4-4 (4 by [1:470:2350])) 2026-01-08T15:25:44.578212Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 4. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.578309Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-5-5 (5 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.586461Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-5-5 (5 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.586520Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-5-5 (5 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.586555Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-5-5 (5 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.586591Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.000000 to 1.250000 (insert task kqp-1-5-5 (5 by [1:470:2350])) 2026-01-08T15:25:44.586627Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 5. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.586790Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-6-6 (6 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.586820Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-6-6 (6 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.586853Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-6-6 (6 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.586894Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-6-6 (6 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.586923Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.250000 to 1.500000 (insert task kqp-1-6-6 (6 by [1:470:2350])) 2026-01-08T15:25:44.586956Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 6. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.587064Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-7-7 (7 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.587096Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-7-7 (7 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587135Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-7-7 (7 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.587164Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-7-7 (7 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587186Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.500000 to 1.750000 (insert task kqp-1-7-7 (7 by [1:470:2350])) 2026-01-08T15:25:44.587210Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 7. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.587292Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-8-8 (8 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.587320Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-8-8 (8 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587358Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-8-8 (8 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.587404Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-8-8 (8 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587455Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.750000 to 2.000000 (insert task kqp-1-8-8 (8 by [1:470:2350])) 2026-01-08T15:25:44.587480Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 8. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.587556Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-9-9 (9 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:44.587576Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-9-9 (9 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587605Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-9-9 (9 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:44.587625Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-9-9 (9 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:44.587649Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 2.000000 to 2.250000 (insert task kqp-1-9-9 (9 by [1:470:2350])) 2026-01-08T15:25:44.587673Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 9. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:44.587749Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:44.587794Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 2.250000 to 2.000000 (remove task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:44.587833Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleSnapshotByExchanger [GOOD] >> KqpRm::SnapshotSharingByExchanger [GOOD] >> KqpRm::DisonnectNodes [GOOD] >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TSubgroupPartLayoutTest::CountEffectiveReplicas4of4 [GOOD] Test command err: testing erasure block-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 260 us testing erasure stripe-4-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 main# 32 main# 33 main# 34 main# 35 main# 36 main# 37 main# 38 main# 39 main# 40 main# 41 main# 42 main# 43 main# 44 main# 45 main# 46 main# 47 main# 48 main# 49 main# 50 main# 51 main# 52 main# 53 main# 54 main# 55 main# 56 main# 57 main# 58 main# 59 main# 60 main# 61 main# 62 main# 63 Checked 262144 cases, took 491373 us testing erasure block-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 1522361 us testing erasure stripe-3-1 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 Checked 256 cases, took 258 us testing erasure stripe-3-2 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 32768 cases, took 23489 us testing erasure stripe-2-3 main# 0 main# 1 main# 2 main# 3 main# 4 main# 5 main# 6 main# 7 main# 8 main# 9 main# 10 main# 11 main# 12 main# 13 main# 14 main# 15 main# 16 main# 17 main# 18 main# 19 main# 20 main# 21 main# 22 main# 23 main# 24 main# 25 main# 26 main# 27 main# 28 main# 29 main# 30 main# 31 Checked 1048576 cases, took 1515674 us |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NodesMembershipByExchanger [GOOD] Test command err: 2026-01-08T15:25:43.877897Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:43.884583Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/00317e/r3tmp/tmp9Xajey/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:43.885113Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/00317e/r3tmp/tmp9Xajey/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/00317e/r3tmp/tmp9Xajey/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 647245231291381833 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:43.971438Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:43.971725Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.005562Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.005683Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.005743Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.005796Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.005866Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.005915Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.005962Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.005995Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.006145Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.024301Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.024558Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.024648Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.024936Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.025099Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.025133Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.025248Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.025426Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.025453Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.025538Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.025636Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.026179Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.026619Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.026837Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.027296Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.027504Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.027672Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.027885Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.028057Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.028140Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:45.231515Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.231634Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.232528Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:45.584280Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleSnapshotByExchanger [GOOD] Test command err: 2026-01-08T15:25:44.943153Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.943623Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003171/r3tmp/tmpkce7RZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.944128Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003171/r3tmp/tmpkce7RZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003171/r3tmp/tmpkce7RZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5562786923726442832 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:45.028894Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:45.029236Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:45.059642Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:45.059779Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:45.059843Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:45.059902Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:45.059983Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:45.060038Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:45.060089Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:45.060110Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:45.060283Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.096577Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885945 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:45.096884Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.096988Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885945 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:45.097249Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:45.097403Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:45.097434Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.097549Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885945 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:45.097759Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:45.097788Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.097852Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885945 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:45.097966Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:45.102566Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:45.102919Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:45.103114Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:45.103519Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:45.103685Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:45.103842Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:45.104042Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:45.104185Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:45.104260Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:45.110967Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:45.111050Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.111116Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.111160Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.111204Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:470:2350])) 2026-01-08T15:25:45.111378Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.111461Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:45.111503Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.111543Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.111593Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.111629Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:470:2350])) 2026-01-08T15:25:45.111758Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.111967Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.112100Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885945 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2026-01-08T15:25:45.112376Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:46.373907Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:46.374154Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:46.378783Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.300050 (remove task kqp-1-2-1 (1 by [1:470:2350])) 2026-01-08T15:25:46.378979Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.100100 2026-01-08T15:25:46.379180Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:46.379877Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:46.380266Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.300050 to 0.100100 (remove task kqp-2-1-2 (2 by [1:470:2350])) 2026-01-08T15:25:46.380641Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:46.381901Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:46.386500Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885946 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:46.388377Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:46.815711Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest >> TestFilterSet::FilterGroup [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SnapshotSharingByExchanger [GOOD] Test command err: 2026-01-08T15:25:43.866809Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:43.867255Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003182/r3tmp/tmpSoefu4/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:43.867704Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003182/r3tmp/tmpSoefu4/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003182/r3tmp/tmpSoefu4/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 3318425875168398098 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:43.936131Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:43.936425Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:43.954030Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:43.954134Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:43.954193Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:43.954242Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:43.954309Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:43.962399Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:43.962455Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:43.962476Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:43.962646Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:43.994884Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:43.995122Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:43.995193Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:43.995419Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:43.995542Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:43.995569Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:43.995660Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:43.995812Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:43.995833Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:43.995885Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885943 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:43.995967Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:43.996429Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:43.996676Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:43.996864Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:43.997248Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:43.997411Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:43.997540Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:43.997741Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:43.997880Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:43.997953Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:45.196214Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.196339Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.196480Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:45.196529Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.196595Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.196641Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.196688Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:45.196939Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.197047Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:470:2350]) priority=0 resources={0, 100} 2026-01-08T15:25:45.197093Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.197133Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:470:2350]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.197171Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:470:2350]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.197220Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:470:2350])) 2026-01-08T15:25:45.197309Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.197393Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.197526Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2026-01-08T15:25:45.197837Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:45.517181Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.517321Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [2:472:2102]) priority=0 resources={0, 100} 2026-01-08T15:25:45.517381Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [2:472:2102]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.517443Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [2:472:2102]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.517495Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [2:472:2102]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.517537Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [2:472:2102])) 2026-01-08T15:25:45.517640Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.517707Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-2-2 (2 by [2:472:2102]) priority=0 resources={0, 100} 2026-01-08T15:25:45.517747Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-2-2 (2 by [2:472:2102]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.517842Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-2-2 (2 by [2:472:2102]) from queue queue_kqp_resource_manager 2026-01-08T15:25:45.517878Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-2-2 (2 by [2:472:2102]) to queue queue_kqp_resource_manager 2026-01-08T15:25:45.517911Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-2-2 (2 by [2:472:2102])) 2026-01-08T15:25:45.517980Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2026-01-08T15:25:45.518045Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.518189Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885945 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2026-01-08T15:25:45.522633Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:45.890731Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:45.890836Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:45.890885Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [1:470:2350])) 2026-01-08T15:25:45.890919Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2026-01-08T15:25:45.890959Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:45.890996Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:470:2350]) (release resources {0, 100}) 2026-01-08T15:25:45.891031Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-1-2 (2 by [1:470:2350])) 2026-01-08T15:25:45.891067Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:45.891119Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:45.891251Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885946 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:45.891562Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:46.238833Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:46.239264Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [2:472:2102]) (release resources {0, 100}) 2026-01-08T15:25:46.239471Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [2:472:2102])) 2026-01-08T15:25:46.239867Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2026-01-08T15:25:46.240106Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:46.240313Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-2-2 (2 by [2:472:2102]) (release resources {0, 100}) 2026-01-08T15:25:46.240515Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-2-2 (2 by [2:472:2102])) 2026-01-08T15:25:46.240914Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2026-01-08T15:25:46.241659Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:46.249163Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885947 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:46.251133Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:46.609712Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2026-01-08T15:25:44.730943Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:25:44.731449Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003180/r3tmp/tmpSIUU7b/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:25:44.731991Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003180/r3tmp/tmpSIUU7b/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003180/r3tmp/tmpSIUU7b/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17350336904204297026 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:25:44.836882Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.837220Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:25:44.868148Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [2:472:2102] with ResourceBroker at [2:442:2101] 2026-01-08T15:25:44.868302Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:473:2103] 2026-01-08T15:25:44.868375Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:609: Start KqpResourceManagerActor at [1:470:2350] with ResourceBroker at [1:441:2331] 2026-01-08T15:25:44.868430Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:471:2351] 2026-01-08T15:25:44.868513Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.868565Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.868617Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:806: Failed to deliver subscription request to config dispatcher 2026-01-08T15:25:44.868638Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:410: Failed to deliver subscription request to config dispatcher. 2026-01-08T15:25:44.868821Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.901121Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.901452Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.901542Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.901811Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.901958Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.901993Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.902089Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.902276Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:763: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2026-01-08T15:25:44.902302Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:923: Don't set KqpProxySharedResources 2026-01-08T15:25:44.902397Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:939: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1767885944 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2026-01-08T15:25:44.902497Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:462: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:44.903032Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2026-01-08T15:25:44.903437Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.903649Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.904074Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:44.904291Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:44.904451Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.904674Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:44.904824Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 1 2026-01-08T15:25:44.904915Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:523: Get resources info from node: 2 2026-01-08T15:25:46.118882Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:46.119007Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2026-01-08T15:25:46.119546Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2026-01-08T15:25:46.119706Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2026-01-08T15:25:46.120213Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2026-01-08T15:25:46.121063Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:62:2076] ServerId# [1:365:2282] TabletId# 72057594037932033 PipeClientId# [2:62:2076] 2026-01-08T15:25:46.121316Z node 2 :TX_PROXY WARN: proxy_impl.cpp:226: actor# [2:152:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2026-01-08T15:25:46.121451Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2026-01-08T15:25:46.121605Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:46.121784Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:492: Subcriber is not available for info exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2026-01-08T15:25:46.121830Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:167: Kill previous info exchanger subscriber for 'kqpexch+/dc-1' at [2:475:2105], reason: tenant updated 2026-01-08T15:25:46.131103Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:476: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:46.131354Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:498: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2026-01-08T15:25:46.493867Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/rm_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::BasicChecks [GOOD] Test command err: None domains 1 new (ns): 187.8841513 None domains 1 old (ns): 88.771611 None domains 9 new (ns): 177.1730308 None domains 9 old (ns): 84.2285468 Mirror3 domains 4 new (ns): 162.4224154 Mirror3 domains 4 old (ns): 74.94551636 Mirror3 domains 9 new (ns): 143.1113998 Mirror3 domains 9 old (ns): 67.29074767 4Plus2Block domains 8 new (ns): 108.2691883 4Plus2Block domains 8 old (ns): 114.3709779 4Plus2Block domains 9 new (ns): 262.3616442 4Plus2Block domains 9 old (ns): 116.9761598 ErasureMirror3of4 domains 8 new (ns): 211.2013401 ErasureMirror3of4 domains 8 old (ns): 154.3671222 ErasureMirror3of4 domains 9 new (ns): 200.2864254 ErasureMirror3of4 domains 9 old (ns): 134.5077406 |85.0%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/groupinfo/ut/unittest >> TestFilterSet::DuplicationValidation >> TDqSolomonWriteActorTest::TestWriteBigBatchSolomon [GOOD] >> TDqSolomonWriteActorTest::TestWriteWithTimeseries |85.0%| [TA] $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |85.0%| [TA] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/groupinfo/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut >> BlobDepot::LoadPutAndRead [GOOD] >> BlobDepot::DecommitPutAndRead |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |85.0%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |85.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut >> TestFilterSet::DuplicationValidation [GOOD] >> TestFilterSet::CompilationValidation |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut_service/fast/ydb-core-kqp-federated_query-ut_service-fast |85.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut_service/fast/ydb-core-kqp-federated_query-ut_service-fast |85.0%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut_service/fast/ydb-core-kqp-federated_query-ut_service-fast >> TTxDataShardBuildFulltextDictScan::BadRequest >> TDqSolomonWriteActorTest::TestWriteWithTimeseries [GOOD] >> TDqSolomonWriteActorTest::TestCheckpoints >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] >> TDataShardLocksTest::Points_OneTx >> TDqSolomonWriteActorTest::TestCheckpoints [GOOD] >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint >> XdsBootstrapConfigInitializer::CanNotSetEnvIfXdsBootstrapConfigIsAbsent [GOOD] >> TestFilterSet::CompilationValidation [GOOD] >> TDataShardLocksTest::Points_OneTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_RemoveAll >> AutoConfig::GetASPoolsWith2CPUs [GOOD] >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> AutoConfig::GetServicePoolsWith1CPU [GOOD] >> XdsBootstrapConfigInitializer::CanSetGrpcXdsBootstrapConfigEnvWithSomeNumberOfXdsServers >> TestFilterSet::Watermark >> XdsBootstrapConfigInitializer::CanSetGrpcXdsBootstrapConfigEnvWithSomeNumberOfXdsServers [GOOD] >> XdsBootstrapConfigInitializer::CanNotSetGrpcXdsBootstrapConfigEnvIfVariableAlreadySet [GOOD] >> AutoConfig::GetASPoolsith1CPU [GOOD] >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] >> XdsBootstrapConfigInitializer::CanSetGrpcXdsBootstrapConfigEnv [GOOD] >> TDataShardLocksTest::MvccTestOooTxDoesntBreakPrecedingReadersLocks [GOOD] >> TDataShardLocksTest::MvccTestOutdatedLocksRemove [GOOD] >> TDataShardLocksTest::MvccTestBreakEdge [GOOD] >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> TStateStorageConfig::TestMultiReplicaFailDomains [GOOD] >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] >> TDataShardLocksTest::Points_ManyTx_RemoveAll [GOOD] >> TDataShardLocksTest::UseLocksCache >> TDataShardLocksTest::MvccTestWriteBreaksLocks [GOOD] >> TDataShardLocksTest::Points_ManyTx >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> XdsBootstrapConfigInitializer::CanNotSetEnvIfXdsBootstrapConfigIsAbsent [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith3CPUs [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith1CPU [GOOD] >> BlobDepot::DecommitPutAndRead [GOOD] >> BlobDepot::DecommitVerifiedRandom |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> XdsBootstrapConfigInitializer::CanSetGrpcXdsBootstrapConfigEnvWithSomeNumberOfXdsServers [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest >> TDqHashCombineTest::TestWideModeAggregationMultiRowNoSpilling [GOOD] >> TDqHashJoinBasicTest::TestBasicPassthrough-BlockJoin |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> XdsBootstrapConfigInitializer::CanNotSetGrpcXdsBootstrapConfigEnvIfVariableAlreadySet [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> TDataShardLocksTest::Points_ManyTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakAll >> TDqHashJoinBasicTest::TestBasicPassthrough-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestBasicPassthrough+BlockJoin >> DescribeSchemaSecretsService::GetNewValue |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> XdsBootstrapConfigInitializer::CanSetGrpcXdsBootstrapConfigEnv [GOOD] |85.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest >> TDqHashJoinBasicTest::TestBasicPassthrough+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestCrossPassthrough-BlockJoin >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] |85.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] |85.0%| [TS] {BAZEL_UPLOAD} ydb/core/driver_lib/run/ut/unittest |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_locks/unittest >> TDqHashJoinBasicTest::TestCrossPassthrough-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestCrossPassthrough+BlockJoin >> TDqHashJoinBasicTest::TestCrossPassthrough+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestMixedKeysPassthrough-BlockJoin >> ClosedIntervalSet::Difference [GOOD] >> ClosedIntervalSet::Contains [GOOD] >> ClosedIntervalSet::EnumInRange >> TDataShardLocksTest::Points_ManyTx_BreakAll [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf |85.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |85.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction >> TDqHashJoinBasicTest::TestMixedKeysPassthrough-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestMixedKeysPassthrough+BlockJoin >> TDqHashJoinBasicTest::TestMixedKeysPassthrough+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyFlows-BlockJoin |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |85.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut >> TDqHashJoinBasicTest::TestEmptyFlows-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyFlows+BlockJoin |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |85.1%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |85.1%| [TA] $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TDqHashJoinBasicTest::TestEmptyFlows+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyLeft-BlockJoin >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |85.1%| [TA] {RESULT} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan >> TDqHashJoinBasicTest::TestEmptyLeft-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyLeft+BlockJoin >> Mirror3of4::ReplicationHuge [GOOD] >> TDqHashJoinBasicTest::TestEmptyLeft+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyRight-BlockJoin >> TDqHashJoinBasicTest::TestEmptyRight-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestEmptyRight+BlockJoin >> TDqHashJoinBasicTest::TestEmptyRight+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestInnerRenamesKind-BlockJoin >> TestFilterSet::Watermark [GOOD] >> TDqHashJoinBasicTest::TestInnerRenamesKind-BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestInnerRenamesKind+BlockJoin |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_locks/unittest >> TTxDataShardBuildFulltextDictScan::BadRequest [GOOD] >> TTxDataShardBuildFulltextDictScan::Build-SkipFirst-SkipLast >> TDqHashJoinBasicTest::TestInnerRenamesKind+BlockJoin [GOOD] >> TDqHashJoinBasicTest::TestBlockSpilling >> TestFilterSet::WatermarkWhere >> ClosedIntervalSet::EnumInRange [GOOD] >> ClosedIntervalSet::EnumInRangeReverse >> DataShardCompaction::CompactBorrowed >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_mirror3of4/unittest >> Mirror3of4::ReplicationHuge [GOOD] Test command err: 2026-01-08T15:25:00.422280Z 1 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.426737Z 2 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:1:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.426907Z 3 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:2:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.427073Z 4 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:3:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.427236Z 5 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:4:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.427384Z 6 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:5:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.427542Z 7 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:6:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.427696Z 8 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:7:0]: (0) SKELETON START Marker# BSVS37 2026-01-08T15:25:00.428095Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428184Z 1 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Sending TEvYardInit: pdiskGuid# 9886505328119947244 skeletonid# [1:139:13] selfid# [1:155:22] delay 0.000000 sec 2026-01-08T15:25:00.428245Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428282Z 2 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) Sending TEvYardInit: pdiskGuid# 3461713635198744868 skeletonid# [2:140:11] selfid# [2:156:12] delay 0.000000 sec 2026-01-08T15:25:00.428316Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428347Z 3 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Sending TEvYardInit: pdiskGuid# 6286086673142309166 skeletonid# [3:141:11] selfid# [3:157:12] delay 0.000000 sec 2026-01-08T15:25:00.428376Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428409Z 4 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) Sending TEvYardInit: pdiskGuid# 17733286047497499535 skeletonid# [4:142:11] selfid# [4:158:12] delay 0.000000 sec 2026-01-08T15:25:00.428439Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428470Z 5 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) Sending TEvYardInit: pdiskGuid# 14949940231141135875 skeletonid# [5:143:11] selfid# [5:159:12] delay 0.000000 sec 2026-01-08T15:25:00.428500Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428530Z 6 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) Sending TEvYardInit: pdiskGuid# 15351883288691602723 skeletonid# [6:144:11] selfid# [6:160:12] delay 0.000000 sec 2026-01-08T15:25:00.428561Z 7 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:6:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428607Z 7 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) Sending TEvYardInit: pdiskGuid# 4220329699958026458 skeletonid# [7:145:11] selfid# [7:161:12] delay 0.000000 sec 2026-01-08T15:25:00.428640Z 8 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:7:0]: (0) LocalRecovery START 2026-01-08T15:25:00.428673Z 8 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) Sending TEvYardInit: pdiskGuid# 11607415269302885246 skeletonid# [8:146:11] selfid# [8:162:12] delay 0.000000 sec 2026-01-08T15:25:00.429045Z 1 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[1:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:0:0] PDiskGuid# 9886505328119947244 CutLogID# [1:139:13] WhiteboardProxyId# [1:122:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.429753Z 1 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[1:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.429835Z 2 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[2:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:1:0] PDiskGuid# 3461713635198744868 CutLogID# [2:140:11] WhiteboardProxyId# [2:124:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.429901Z 2 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[2:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.429953Z 3 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[3:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:2:0] PDiskGuid# 6286086673142309166 CutLogID# [3:141:11] WhiteboardProxyId# [3:126:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.430015Z 3 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[3:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.430071Z 4 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[4:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:3:0] PDiskGuid# 17733286047497499535 CutLogID# [4:142:11] WhiteboardProxyId# [4:128:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.430124Z 4 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[4:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.430180Z 5 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[5:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:4:0] PDiskGuid# 14949940231141135875 CutLogID# [5:143:11] WhiteboardProxyId# [5:130:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.430235Z 5 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[5:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.430279Z 6 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[6:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:5:0] PDiskGuid# 15351883288691602723 CutLogID# [6:144:11] WhiteboardProxyId# [6:132:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.434356Z 6 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[6:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.434432Z 7 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[7:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:6:0] PDiskGuid# 4220329699958026458 CutLogID# [7:145:11] WhiteboardProxyId# [7:134:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.434523Z 7 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[7:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.434577Z 8 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:463} PDiskMock[8:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:7:0] PDiskGuid# 11607415269302885246 CutLogID# [8:146:11] WhiteboardProxyId# [8:136:10] SlotId# 0 GroupSizeInUnits# 0} 2026-01-08T15:25:00.434627Z 8 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:500} PDiskMock[8:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 SlotSizeInUnits# 0 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943 IsTinyDisk# 0}} OwnedChunks# {}} Created# true 2026-01-08T15:25:00.436279Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2026-01-08T15:25:00.437427Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2026-01-08T15:25:00.442414Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2026-01-08T15:25:00.443505Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [Explici ... g Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 579 Lsn# 25 LsnSegmentStart# 25 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2026-01-08T15:26:04.534252Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 21 Cookie# 9}{Lsn# 22 Cookie# 10}{Lsn# 23 Cookie# 11}{Lsn# 24 Cookie# 12}} Recipient# [8:146:11] 2026-01-08T15:26:04.534282Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [8:363:30] 2026-01-08T15:26:04.537199Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 579 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2026-01-08T15:26:04.537264Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [7:353:30] 2026-01-08T15:26:04.537356Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 579 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2026-01-08T15:26:04.537396Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [8:363:30] 2026-01-08T15:26:04.537664Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.537963Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 579 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2026-01-08T15:26:04.538007Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [7:353:30] 2026-01-08T15:26:04.538056Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD(0x7cdd689239c0): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819634240} 2026-01-08T15:26:04.538118Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:609} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 579 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2026-01-08T15:26:04.538155Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:671} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [8:363:30] 2026-01-08T15:26:04.538227Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[2:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819634240} VDiskId# [0:4294967295:0:1:0] 2026-01-08T15:26:04.539064Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:814} PDiskMock[2:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 137427819634240 StatusFlags# None} 2026-01-08T15:26:04.539201Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD FINISHED(0x7cdd689239c0): actualReadN# 1 origReadN# 1 2026-01-08T15:26:04.539495Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1369701526376808448} BlockedGeneration# 0} 2026-01-08T15:26:04.569013Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.569830Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD(0x7cdd68980fc0): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819842112} 2026-01-08T15:26:04.570227Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[3:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819842112} VDiskId# [0:4294967295:0:2:0] 2026-01-08T15:26:04.595129Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:814} PDiskMock[3:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 137427819842112 StatusFlags# None} 2026-01-08T15:26:04.595342Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD FINISHED(0x7cdd68980fc0): actualReadN# 1 origReadN# 1 2026-01-08T15:26:04.595470Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:1] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 2522623030983655424} BlockedGeneration# 0} 2026-01-08T15:26:04.633804Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.634083Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2026-01-08T15:26:04.635174Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.635344Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2026-01-08T15:26:04.636307Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.636564Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD(0x7cdd688f6b40): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819633216} 2026-01-08T15:26:04.636645Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:761} PDiskMock[6:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 137427819633216} VDiskId# [0:4294967295:0:5:0] 2026-01-08T15:26:04.664616Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:814} PDiskMock[6:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 137427819633216 StatusFlags# None} 2026-01-08T15:26:04.664751Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD FINISHED(0x7cdd688f6b40): actualReadN# 1 origReadN# 1 2026-01-08T15:26:04.664904Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1946162278680231936} {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 1946162278680231936} BlockedGeneration# 0} 2026-01-08T15:26:04.699193Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.699450Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} 2026-01-08T15:26:04.700808Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2026-01-08T15:26:04.700975Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> PersQueueSdkReadSessionTest::SettingsValidation >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_mirror3of4/unittest >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> TDqHashJoinBasicTest::TestBlockSpilling [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> MediatorTest::BasicTimecastUpdates >> ReadSessionImplTest::DecompressRaw >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> ReadSessionImplTest::ProperlyOrdersDecompressedData ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2026-01-08T15:26:07.179612Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.179638Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.179718Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.180129Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.180744Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.192473Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.192845Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.194192Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.194212Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.194229Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.198206Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.198669Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.198874Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.199056Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.199389Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:26:07.200372Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.200400Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.200448Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.200689Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.201206Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.201300Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.201492Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.201898Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.202081Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:07.202142Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.202165Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:26:07.203283Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.203308Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.203421Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.205341Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.205771Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.213888Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.214249Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2026-01-08T15:26:07.215150Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:07.215365Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:26:07.215631Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:26:07.215814Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:26:07.215901Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.215932Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:07.215959Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:07.216088Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2026-01-08T15:26:07.216162Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:07.216180Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:26:07.216196Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:07.216313Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2026-01-08T15:26:07.216364Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2026-01-08T15:26:07.216383Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2026-01-08T15:26:07.216405Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:07.216470Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2026-01-08T15:26:07.216508Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2026-01-08T15:26:07.216528Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2026-01-08T15:26:07.216554Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:07.216635Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2026-01-08T15:26:07.217816Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.217837Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.217863Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.221498Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.223204Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.223370Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.223550Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2026-01-08T15:26:07.224543Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:07.224781Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:26:07.225138Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:26:07.225401Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:26:07.226133Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.226181Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:07.226204Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:07.226233Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:26:07.226266Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:07.226473Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 5). Partition stream id: 1 GOT RANGE 0 5 Getting new event 2026-01-08T15:26:07.226568Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2026-01-08T15:26:07.226586Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2026-01-08T15:26:07.226615Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2026-01-08T15:26:07.226633Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2026-01-08T15:26:07.226653Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:07.226773Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 GOT RANGE 5 9 2026-01-08T15:26:07.228152Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.228174Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.228198Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.228497Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.228898Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.229016Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.229173Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.230092Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:07.230788Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:07.231050Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2026-01-08T15:26:07.231158Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:26:07.231255Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.231281Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:07.231301Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2026-01-08T15:26:07.231323Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2026-01-08T15:26:07.231363Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2026-01-08T15:26:07.231383Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2026-01-08T15:26:07.231507Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 end_offset: 3 } } RANGE 0 3 2026-01-08T15:26:07.231633Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 12). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 start_offset: 3 end_offset: 12 } } RANGE 3 12 |85.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2026-01-08T15:26:07.022926Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.022961Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.022990Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.024612Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.025272Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:07.025332Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.026368Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.026432Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.026451Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.027060Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.027351Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:07.027399Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.031495Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.031518Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.032343Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.032704Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:07.032770Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.032807Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.032931Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2026-01-08T15:26:07.034302Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.034340Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.034357Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.034659Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:26:07.034695Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.034714Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.034775Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2026-01-08T15:26:07.035832Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:26:07.035860Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:26:07.035879Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.037198Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.037546Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.073246Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:26:07.077787Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.078136Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2026-01-08T15:26:07.082141Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2026-01-08T15:26:07.082952Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.083004Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:07.083036Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:07.083057Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2026-01-08T15:26:07.083084Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2026-01-08T15:26:07.083107Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2026-01-08T15:26:07.083123Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2026-01-08T15:26:07.083142Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2026-01-08T15:26:07.083175Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2026-01-08T15:26:07.083194Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2026-01-08T15:26:07.083213Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2026-01-08T15:26:07.083255Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2026-01-08T15:26:07.083278Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2026-01-08T15:26:07.083294Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2026-01-08T15:26:07.083310Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2026-01-08T15:26:07.083327Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2026-01-08T15:26:07.083384Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2026-01-08T15:26:07.083402Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2026-01-08T15:26:07.083420Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2026-01-08T15:26:07.083438Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2026-01-08T15:26:07.083455Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2026-01-08T15:26:07.083478Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2026-01-08T15:26:07.083498Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2026-01-08T15:26:07.083517Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2026-01-08T15:26:07.083546Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2026-01-08T15:26:07.083572Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2026-01-08T15:26:07.083591Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2026-01-08T15:26:07.083617Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2026-01-08T15:26:07.083643Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2026-01-08T15:26:07.083662Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2026-01-08T15:26:07.083682Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2026-01-08T15:26:07.083705Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2026-01-08T15:26:07.083794Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2026-01-08T15:26:07.083829Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2026-01-08T15:26:07.083851Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2026-01-08T15:26:07.083871Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2026-01-08T15:26:07.083887Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2026-01-08T15:26:07.083908Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2026-01-08T15:26:07.083924Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2026-01-08T15:26:07.083948Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2026-01-08T15:26:07.083966Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2026-01-08T15:26:07.083982Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2026-01-08T15:26:07.084000Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2026-01-08T15:26:07.084025Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2026-01-08T15:26:07.084055Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2026-01-08T15:26:07.084072Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2026-01-08T15:26:07.084090Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2026-01-08T15:26:07.084107Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2026-01-08T15:26:07.084124Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2026-01-08T15:26:07.084155Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2026-01-08T15:26:07.084225Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2026-01-08T15:26:07.086580Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2026-01-08T15:26:07.086787Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2026-01-08T15:26:07.086824Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2026-01-08T15:26:07.086845Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2026-01-08T15:26:07.086864Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2026-01-08T15:26:07.086890Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2026-01-08T15:26:07.086907Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2026-01-08T15:26:07.086924Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2026-01-08T15:26:07.086940Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2026-01-08T15:26:07.086975Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2026-01-08T15:26:07.086991Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2026-01-08T15:26:07.087010Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2026-01-08T15:26:07.087028Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2026-01-08T15:26:07.087045Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2026-01-08T15:26:07.087064Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2026-01-08T15:26:07.087097Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2026-01-08T15:26:07.087119Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2026-01-08T15:26:07.087166Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2026-01-08T15:26:07.087188Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2026-01-08T15:26:07.087223Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2026-01-08T15:26:07.087247Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2026-01-08T15:26:07.087262Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2026-01-08T15:26:07.087278Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2026-01-08T15:26:07.087296Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2026-01-08T15:26:07.087312Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2026-01-08T15:26:07.087349Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2026-01-08T15:26:07.087374Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2026-01-08T15:26:07.087390Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2026-01-08T15:26:07.087420Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2026-01-08T15:26:07.087438Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2026-01-08T15:26:07.087459Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2026-01-08T15:26:07.087487Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2026-01-08T15:26:07.087509Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2026-01-08T15:26:07.087579Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2026-01-08T15:26:07.087601Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2026-01-08T15:26:07.087618Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2026-01-08T15:26:07.087635Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2026-01-08T15:26:07.087653Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2026-01-08T15:26:07.087669Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2026-01-08T15:26:07.087688Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2026-01-08T15:26:07.087704Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2026-01-08T15:26:07.087732Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2026-01-08T15:26:07.087762Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2026-01-08T15:26:07.087780Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2026-01-08T15:26:07.087796Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2026-01-08T15:26:07.087811Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2026-01-08T15:26:07.087829Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2026-01-08T15:26:07.087860Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2026-01-08T15:26:07.087882Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2026-01-08T15:26:07.087897Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2026-01-08T15:26:07.087916Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2026-01-08T15:26:07.087970Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2026-01-08T15:26:07.093456Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:26:07.095544Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.095579Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.095602Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.095956Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.096367Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.096603Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.099266Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.200120Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.200405Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:26:07.200464Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.200497Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:26:07.200544Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:26:07.402643Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2026-01-08T15:26:07.506708Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:26:07.506838Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:26:07.506989Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:26:07.508118Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.508140Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.508159Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.508507Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.512316Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:07.512496Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.514742Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:07.619218Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.619425Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:26:07.619477Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:07.619517Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:26:07.619578Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2026-01-08T15:26:07.619664Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:26:07.619824Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:26:07.619880Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:26:07.619984Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> BlobDepot::DecommitVerifiedRandom [GOOD] >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> BlobDepot::CheckIntegrity >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] |85.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/comp_nodes/ut/unittest >> TDqHashJoinBasicTest::TestBlockSpilling [GOOD] |85.1%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/comp_nodes/ut/unittest >> TestFilterSet::WatermarkWhere [GOOD] >> TestFilterSet::WatermarkWhereFalse >> ClosedIntervalSet::EnumInRangeReverse [GOOD] >> GivenIdRange::IssueNewRange [GOOD] >> GivenIdRange::Trim >> BlobDepot::CheckIntegrity [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2026-01-08T15:26:07.943314Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.943351Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.943379Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.943830Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:07.943889Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.943919Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.944940Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007357s 2026-01-08T15:26:07.945377Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.949558Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:07.949619Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.950500Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.950520Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.950544Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.951167Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:07.951200Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.951229Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.951291Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007874s 2026-01-08T15:26:07.951700Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.952077Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:07.952123Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.954001Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.954018Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.954033Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.954427Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:26:07.954476Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.954505Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.954569Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.251418s 2026-01-08T15:26:07.960982Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.961438Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:07.961539Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.962507Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.962528Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.962545Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.976589Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:26:07.976648Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.976673Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.976760Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.253627s 2026-01-08T15:26:07.978658Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:07.998593Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:26:08.001134Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.002478Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.002504Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.002523Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.003105Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.003697Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.015195Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.015559Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2026-01-08T15:26:08.015599Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.015619Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.015669Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.278066s 2026-01-08T15:26:08.016391Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:26:08.017563Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.017624Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.017678Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.017982Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.018397Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.018600Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.018917Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.123228Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.124142Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:26:08.124209Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.124260Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:26:08.124329Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:26:08.226504Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:26:08.226721Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:26:08.227820Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.227841Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.227863Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.231832Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.234063Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.234261Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.238534Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.341201Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.346515Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:26:08.346593Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.346638Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:26:08.346716Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2026-01-08T15:26:08.346829Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:26:08.347072Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:26:08.347133Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:26:08.347222Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster |85.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] >> TAllocatorSuite32::AllocationWithReclaimTwoThreads [GOOD] >> TAllocatorSuite32::AllocationWithReclaimThreeThreads >> GivenIdRange::Trim [GOOD] >> GivenIdRange::Subtract ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest >> BlobDepot::CheckIntegrity [GOOD] Test command err: Mersenne random seed 2220279346 RandomSeed# 9835853816774599036 Mersenne random seed 1261803875 Mersenne random seed 311329236 Mersenne random seed 2981464356 Mersenne random seed 3845689409 2026-01-08T15:25:22.617365Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617523Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617577Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617629Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617680Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617732Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617796Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.617845Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.618120Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [1946c10e3eaa5b4b] Result# TEvPutResult {Id# [15:1:1:0:1:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2026-01-08T15:25:22.635580Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.635762Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.635823Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.635878Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.635930Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.636002Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.636060Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.636116Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.681904Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.682124Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.682181Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.682245Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:2] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.682301Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.686378Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.686437Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.686491Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2026-01-08T15:25:22.686752Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [ac7248cc0e411905] Result# TEvPutResult {Id# [16:2:2:0:2:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 Mersenne random seed 2004570547 Read over the barrier, blob id# [15:1:1:0:1:100:0] Read over the barrier, blob id# [15:1:2:0:1:100:0] 2026-01-08T15:25:25.560042Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560361Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560449Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560531Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560611Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560690Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560771Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2026-01-08T15:25:25.560852Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 Put over the barrier, blob id# [15:1:1:0:99:100:0] Put over the barrier, blob id# [15:1:3:0:99:100:0] 2026-01-08T15:25:25.607417Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.607719Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.607797Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.607869Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.607940Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.608012Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.608083Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2026-01-08T15:25:25.608156Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 Read over the barrier, blob id# [15:1:5:0:1:100:0] Read over the barrier, blob id# [15:1:6:0:1:100:0] Read over the barrier, blob id# [15:1:19:0:1:100:0] Read over the barrier, blob id# [15:2:1:0:1:100:0] Read over the barrier, blob id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:1:17:0:1:100:0] TEvRange returned collected blob with id# [15:1:19:0:1:100:0] TEvRange returned collected blob with id# [15:2:1:0:1:100:0] TEvRange returned collected blob with id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:2:3:0:1:100:0] TEvRange returned collected blob with id# [15:2:4:0:1:100:0] TEvRange returned collected blob with id# [15:2:5:0:1:100:0] TEvRange returned collected blob with id# [15:2:6:0:1:100:0] Read over the barrier, blob id# [100:1:3:0:1:100:0] Read over the barrier, blob id# [100:1:5:0:1:100:0] Read over the barrier, blob id# [100:1:6:0:1:100:0] Read over the barrier, blob id# [100:2:1:0:1:100:0] Read over the barrier, blob id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:3:0:1:100:0] TEvRange returned collected blob with id# [100:2:4:0:1:100:0] TEvRange returned collected blob with id# [100:2:5:0:1:100:0] TEvRange returned collected blob with id# [100:2:6:0:1:100:0] Mersenne random seed 186860696 Read over the barrier, blob id# [102:2:1:2:5111717:218:0] Read over the barrier, blob id# [102:2:1:2:5111717:218:0] Read over the barrier, blob id# [102:2:1:2:5111717:218:0] Read over the barrier, blob id# [100:1:1:1:11102562:947:0] Read over the barrier, blob id# [100:1:1:1:111025 ... # [17:2:2:2:8252322:501:0] Read over the barrier, blob id# [16:2:1:1:2156865:171:0] Read over the barrier, blob id# [16:4:8:0:8059887:920:0] Read over the barrier, blob id# [16:3:6:1:881185:332:0] Read over the barrier, blob id# [16:2:1:2:15586457:511:0] Read over the barrier, blob id# [16:2:1:0:11549351:553:0] Read over the barrier, blob id# [16:3:5:2:14369243:312:0] Read over the barrier, blob id# [16:2:1:0:1359762:365:0] Read over the barrier, blob id# [16:4:8:1:6759170:80:0] Read over the barrier, blob id# [16:4:8:1:6759170:80:0] Read over the barrier, blob id# [16:3:5:2:14369243:312:0] Read over the barrier, blob id# [16:4:8:1:6759170:80:0] Read over the barrier, blob id# [16:4:8:0:8059887:920:0] Read over the barrier, blob id# [16:2:1:0:6140699:577:0] Read over the barrier, blob id# [16:4:8:1:6759170:80:0] Read over the barrier, blob id# [16:2:1:1:2156865:171:0] TEvRange returned collected blob with id# [17:2:2:2:8252322:501:0] TEvRange returned collected blob with id# [17:2:6:2:5091868:933:0] TEvRange returned collected blob with id# [17:2:9:2:12451474:66:0] TEvRange returned collected blob with id# [17:3:12:2:15787354:163:0] TEvRange returned collected blob with id# [17:3:12:2:16649324:889:0] TEvRange returned collected blob with id# [17:3:15:2:1995045:94:0] TEvRange returned collected blob with id# [17:4:17:2:6069607:840:0] TEvRange returned collected blob with id# [17:4:19:2:14615038:910:0] TEvRange returned collected blob with id# [17:4:20:2:12033255:644:0] Read over the barrier, blob id# [16:4:8:2:9278322:703:0] Read over the barrier, blob id# [16:2:1:0:1359762:365:0] Read over the barrier, blob id# [15:2:1:2:1852326:187:0] Read over the barrier, blob id# [15:3:2:0:15333082:933:0] Read over the barrier, blob id# [15:1:1:0:16187940:165:0] 2026-01-08T15:26:07.329187Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.329993Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.330164Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.330382Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.330531Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.330675Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.330835Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 2026-01-08T15:26:07.331012Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 15 key# [15 0 23 3 soft] existing barrier# 3:1 new barrier# 4:1 Read over the barrier, blob id# [15:3:2:0:15333082:933:0] Read over the barrier, blob id# [15:2:1:2:15195732:937:0] Read over the barrier, blob id# [15:2:1:1:8114685:605:0] TEvRange returned collected blob with id# [17:1:1:1:11163165:497:0] TEvRange returned collected blob with id# [17:2:7:1:10250210:782:0] 2026-01-08T15:26:07.392659Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.393525Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.393701Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.393891Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.394073Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.394223Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.394422Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 2026-01-08T15:26:07.394594Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 1 hard] barrier# 2:0 new key# [15 0 23 5 hard] barrier# 1:0 Read over the barrier, blob id# [17:3:12:2:16649324:889:0] Read over the barrier, blob id# [17:4:16:1:14420876:681:0] Read over the barrier, blob id# [17:3:12:2:16649324:889:0] Read over the barrier, blob id# [17:4:17:2:6069607:840:0] Read over the barrier, blob id# [17:4:16:1:14420876:681:0] Read over the barrier, blob id# [17:4:20:1:13337815:732:0] TEvRange returned collected blob with id# [16:2:1:1:2156865:171:0] TEvRange returned collected blob with id# [16:2:1:1:15580328:123:0] TEvRange returned collected blob with id# [16:3:6:1:881185:332:0] TEvRange returned collected blob with id# [16:4:8:1:6759170:80:0] TEvRange returned collected blob with id# [16:4:8:2:3925696:921:0] TEvRange returned collected blob with id# [16:4:8:2:9278322:703:0] TEvRange returned collected blob with id# [16:4:9:2:9733785:286:0] Read over the barrier, blob id# [16:3:6:1:881185:332:0] Read over the barrier, blob id# [16:4:8:0:8059887:920:0] Read over the barrier, blob id# [16:2:1:1:15580328:123:0] TEvRange returned collected blob with id# [17:1:1:1:11163165:497:0] TEvRange returned collected blob with id# [17:2:7:1:10250210:782:0] TEvRange returned collected blob with id# [17:4:16:1:14420876:681:0] TEvRange returned collected blob with id# [17:4:20:1:6575699:266:0] TEvRange returned collected blob with id# [17:4:20:1:12834731:882:0] TEvRange returned collected blob with id# [17:4:20:1:13337815:732:0] TEvRange returned collected blob with id# [17:3:12:2:15787354:163:0] TEvRange returned collected blob with id# [17:3:12:2:16649324:889:0] TEvRange returned collected blob with id# [17:3:15:2:1995045:94:0] TEvRange returned collected blob with id# [17:4:17:2:6069607:840:0] TEvRange returned collected blob with id# [17:4:19:2:14615038:910:0] TEvRange returned collected blob with id# [17:4:20:2:12033255:644:0] TEvRange returned collected blob with id# [17:6:23:2:9849771:111:0] Read over the barrier, blob id# [15:1:1:1:5142262:482:0] TEvRange returned collected blob with id# [16:2:1:1:2156865:171:0] TEvRange returned collected blob with id# [16:2:1:1:15580328:123:0] TEvRange returned collected blob with id# [16:3:6:1:881185:332:0] TEvRange returned collected blob with id# [16:4:8:1:6759170:80:0] TEvRange returned collected blob with id# [16:4:8:2:3925696:921:0] TEvRange returned collected blob with id# [16:4:8:2:9278322:703:0] TEvRange returned collected blob with id# [16:4:9:2:9733785:286:0] Read over the barrier, blob id# [15:2:1:2:15195732:937:0] Read over the barrier, blob id# [15:2:1:2:15195732:937:0] Read over the barrier, blob id# [15:1:1:0:16187940:165:0] Read over the barrier, blob id# [15:1:1:1:5142262:482:0] Read over the barrier, blob id# [15:3:2:0:15333082:933:0] 2026-01-08T15:26:07.724005Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.724827Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725024Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725200Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725401Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725589Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725780Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 2026-01-08T15:26:07.725974Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 23 8 soft] barrier# 4:0 new key# [15 0 29 0 soft] barrier# 3:1 Mersenne random seed 597596185 ErrorReason DataInfo Disks: 0: [82000000:1:0:2:0] 1: [82000000:1:0:3:0] 2: [82000000:1:0:4:0] 3: [82000000:1:0:5:0] 4: [82000000:1:0:6:0] 5: [82000000:1:0:7:0] 6: [82000000:1:0:0:0] 7: [82000000:1:0:1:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ErrorReason DataInfo [72075186224037888:1:1:2:1:100:0] Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest >> GivenIdRange::Subtract [GOOD] >> GivenIdRange::Points ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2026-01-08T15:26:08.128240Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.128276Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.128297Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.128651Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.129188Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.140552Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.141304Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.142065Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:08.142533Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:08.146491Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2026-01-08T15:26:08.146596Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.146710Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.146751Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2026-01-08T15:26:08.146788Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:26:08.146830Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:26:08.154761Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.154787Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.154822Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.155233Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.155676Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.155898Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.159769Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2026-01-08T15:26:08.160706Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:08.160901Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:26:08.161198Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:26:08.161471Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:26:08.163524Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.163564Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:08.163601Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:08.163745Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2026-01-08T15:26:08.163862Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:08.163890Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:26:08.163910Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:08.164038Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2026-01-08T15:26:08.164101Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2026-01-08T15:26:08.164132Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2026-01-08T15:26:08.164153Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:08.164226Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2026-01-08T15:26:08.164269Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2026-01-08T15:26:08.164288Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2026-01-08T15:26:08.164308Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:08.164390Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2026-01-08T15:26:08.165857Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.165884Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.165904Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.178285Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.179238Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.179432Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.179714Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2026-01-08T15:26:08.180806Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:26:08.181031Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:26:08.182143Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:26:08.182351Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:26:08.182466Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.182507Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:08.182597Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 GOT RANGE 0 2 Getting new event 2026-01-08T15:26:08.182670Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:08.182688Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:08.182734Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 GOT RANGE 2 3 Getting new event 2026-01-08T15:26:08.182775Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:08.182794Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:26:08.182848Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 GOT RANGE 3 4 Getting new event 2026-01-08T15:26:08.182899Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:26:08.182916Z :DEBUG: [db] [sessionid] [cluster] The application data ... er". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:10.181452Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 201). Partition stream id: 1 GOT RANGE 0 201 2026-01-08T15:26:10.290920Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:26:10.290959Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:26:10.290999Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:10.310467Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:10.324108Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:10.324341Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:26:10.330573Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2026-01-08T15:26:10.507312Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2026-01-08T15:26:10.508254Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:10.509870Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:26:10.512295Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:26:10.513074Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2026-01-08T15:26:10.521243Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2026-01-08T15:26:10.522027Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2026-01-08T15:26:10.522844Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2026-01-08T15:26:10.523639Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2026-01-08T15:26:10.531279Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2026-01-08T15:26:10.532035Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2026-01-08T15:26:10.532100Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2026-01-08T15:26:10.532239Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:26:10.535391Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 11). Partition stream id: 1 GOT RANGE 0 11 2026-01-08T15:26:10.542458Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.542481Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.542502Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:10.558564Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:10.559062Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:10.559234Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.560128Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:10.560533Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2026-01-08T15:26:10.562312Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.562405Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.562425Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:10.562688Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:10.562991Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:10.563097Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.564300Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:10.564646Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:10.564758Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:10.564806Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:26:10.565191Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 |85.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> GivenIdRange::Points [GOOD] >> GivenIdRange::Runs >> GivenIdRange::Runs [GOOD] >> GivenIdRange::Allocate >> TTxDataShardBuildFulltextDictScan::Build-SkipFirst-SkipLast [GOOD] >> TTxDataShardBuildFulltextDictScan::Build+SkipFirst-SkipLast >> GivenIdRange::Allocate [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] >> MediatorTest::BasicTimecastUpdates [GOOD] >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] >> TestFilterSet::WatermarkWhereFalse [GOOD] >> MediatorTest::MultipleTablets >> TestFormatHandler::ManyJsonClients >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blob_depot/ut/unittest >> GivenIdRange::Allocate [GOOD] |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/blob_depot/ut/unittest >> DescribeSchemaSecretsService::GetNewValue [GOOD] >> DescribeSchemaSecretsService::GetUpdatedValue >> TAllocatorSuite128::AllocationWithReclaimFourThreads [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/providers/solomon/actors/ut/unittest >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] Test command err: 2026-01-08T15:25:13.227401Z node 1 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:25:13.234835Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:25:13.234992Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2026-01-08T15:25:13.235119Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2026-01-08T15:25:13.235134Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:13.283466Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:13 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1} 2026-01-08T15:25:13.286520Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:23.582988Z node 2 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:25:23.641271Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 7500 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:25:23.680095Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2026-01-08T15:25:23.713829Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.743040Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.775096Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.807057Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.838843Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.870542Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:25:23.884499Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 54513 bytes of data to buffer 2026-01-08T15:25:23.884797Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2026-01-08T15:25:23.885058Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:23.885251Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:23.885264Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:24.058619Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.059100Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:24.059119Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:24.120520Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.120841Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:24.120855Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:24.186795Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.187190Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:24.187216Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:24.342741Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[4]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.343136Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:25:24.343171Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:24.446793Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[3]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.447017Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 500 metrics with size of 54513 bytes to solomon 2026-01-08T15:25:24.447031Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer MaxRequestsInflight 2026-01-08T15:25:24.653675Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[5]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.653789Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:24.726742Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[7]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 500} 2026-01-08T15:25:24.726833Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:24.869898Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[6]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:25:24 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:25:24.870011Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:35.517525Z node 3 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:25:35.568196Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 7500 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:25:35.612545Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 79903 bytes of data to buffer 2026-01-08T15:25:35.631845Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.659263Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.678254Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.701614Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.716612Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.727393Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 81013 bytes of data to buffer 2026-01-08T15:25:35.732856Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 40513 bytes of data to buffer 2026-01-08T15:25:35.733141Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 79903 bytes to solomon 2026-01-08T15:25:35.733368Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:35.733530Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:35.733542Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:36.014768Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.015128Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:36.015139Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:36.138722Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.139061Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:36.139075Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:36.274751Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.275117Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:36.275144Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:36.390756Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[3]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.391103Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 81013 bytes to solomon 2026-01-08T15:25:36.391116Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:25:36.474770Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[4]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.474960Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 500 metrics with size of 40513 bytes to solomon 2026-01-08T15:25:36.474974Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer MaxRequestsInflight 2026-01-08T15:25:36.559626Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[5]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.559716Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:36.670905Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[6]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 1000} 2026-01-08T15:25:36.671017Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:36.713946Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[7]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 25 Date: Thu, 08 Jan 2026 15:25:36 GMT Server: Python/3.12 aiohttp/3.10.6 {"sensorsProcessed": 500} 2026-01-08T15:25:36.714039Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:49.662824Z node 4 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:25:49.663687Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 10 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:25:49.670480Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 579 bytes of data to buffer 2026-01-08T15:25:49.670655Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 10 metrics with size of 579 bytes to solomon 2026-01-08T15:25:49.670670Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:25:49.698766Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 27 Date: Thu, 08 Jan 2026 15:25:49 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 10} 2026-01-08T15:25:49.698862Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:26:00.983029Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:26:01.076269Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 2400 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:26:01.262972Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2026-01-08T15:26:01.272843Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2026-01-08T15:26:01.277178Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 43613 bytes of data to buffer 2026-01-08T15:26:01.277447Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2026-01-08T15:26:01.277829Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2026-01-08T15:26:01.277937Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 400 metrics with size of 43613 bytes to solomon 2026-01-08T15:26:01.277953Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2026-01-08T15:26:01.308118Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Thu, 08 Jan 2026 15:26:01 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 400} 2026-01-08T15:26:01.310286Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 2 2026-01-08T15:26:01.451067Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:26:01 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:26:01.451214Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: CheckpointInProgress Empty buffer 2026-01-08T15:26:01.506688Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Thu, 08 Jan 2026 15:26:01 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1000} 2026-01-08T15:26:01.506886Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:26:02.314042Z node 6 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2026-01-08T15:26:02.314345Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:26:02.314479Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2026-01-08T15:26:02.314603Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2026-01-08T15:26:02.314621Z node 6 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 1 2026-01-08T15:26:02.322235Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:26:02 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1} 2026-01-08T15:26:02.322380Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:26:02.323096Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2026-01-08T15:26:02.323247Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2026-01-08T15:26:02.323351Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2026-01-08T15:26:02.323369Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2026-01-08T15:26:02.328420Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Thu, 08 Jan 2026 15:26:02 GMT Server: Python/3.12 aiohttp/3.10.6 {"writtenMetricsCount": 1} 2026-01-08T15:26:02.328557Z node 6 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer |85.1%| [TM] {BAZEL_UPLOAD} ydb/library/yql/providers/solomon/actors/ut/unittest >> TTxDataShardBuildFulltextDictScan::Build+SkipFirst-SkipLast [GOOD] >> TTxDataShardBuildFulltextDictScan::Build-SkipFirst+SkipLast >> TDataShardLocksTest::UseLocksCache [GOOD] >> TestFormatHandler::ManyJsonClients [GOOD] >> DynamicProxy::RaceCheck10 ------- [TM] {asan, default-linux-x86_64, release} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/gtest >> TAllocatorSuite128::AllocationWithReclaimFourThreads [GOOD] Test command err: ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp:44: Value of: i == 32 Actual: false Expected: true AddressSanitizer:DEADLYSIGNAL ================================================================= ==42593==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x0000015e091c bp 0x7ffc240e3530 sp 0x7ffc240e3530 T0) ==42593==The signal is caused by a READ memory access. ==42593==Hint: address points to the zero page. #0 0x0000015e091c in Get /-S/util/generic/ptr.h:592:16 #1 0x0000015e091c in AsT /-S/util/generic/ptr.h:153:50 #2 0x0000015e091c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x0000015e091c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x00000059cd71 in TAllocatorSuite128_SlotPoolHugeAlloc_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp:75:9 #5 0x0000009fb3b4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #6 0x0000009fb3b4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #7 0x0000009fb0e0 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #8 0x0000009fd965 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #9 0x000000a00811 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #10 0x000000a2b85e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #11 0x000000a2a254 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #12 0x000000a2a254 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #13 0x000000a29dc6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #14 0x000000a8755f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #15 0x000000a8755f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #16 0x7f3f01322d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #17 0x7f3f01322e3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x00000056caa8 in _start (/home/runner/.ya/build/build_root/663n/003ce0/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/interconnect-rdma-ut_mem_pool_limit_128+0x56caa8) (BuildId: fbe7bd43922bb9b19f9ea62cd4d13de8fc84e1b3) ==42593==Register values: rax = 0x0000000000000010 rbx = 0x00007ffc240e3540 rcx = 0x0000000000000002 rdx = 0x00000f67dfdfbe40 rdi = 0x0000000000000000 rsi = 0x00007b3efefdf1e0 rbp = 0x00007ffc240e3530 rsp = 0x00007ffc240e3530 r8 = 0x0000000000000001 r9 = 0x0000000000000002 r10 = 0x00007d7f002e0000 r11 = 0x0000000000000001 r12 = 0x00000f685fe351c0 r13 = 0x00007b3eff1e8e20 r14 = 0x00007b3eff1e8e00 r15 = 0x00007b3eff1e8e00 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==42593==ABORTING ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp:94: Value of: i == 262144 Actual: false Expected: true ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/allocator_ut.cpp:124: Value of: i == 262144 Actual: false Expected: true === Average time per allocation t0: 349.593 us, t1: 348.879 us, t2: 349.325 us, t3: 348.086 us Average time per allocation t0: 9.33859 us, t1: 10.0603 us, t2: 9.80021 us, t3: 10.0749 us Average time per allocation t0: 8.99465 us, t1: 9.61753 us, t2: 10.2488 us, t3: 8.8627 us Average time per allocation t0: 9.79095 us, t1: 11.0047 us, t2: 10.154 us, t3: 9.87685 us Average time per allocation t0: 4.84647 us, t1: 6.622 us, t2: 6.08362 us, t3: 6.46117 us Average time per allocation t0: 2.65791 us, t1: 3.28888 us, t2: 3.40034 us, t3: 3.52082 us Average time per allocation t0: 4.58828 us, t1: 2.56939 us, t2: 5.50508 us, t3: 6.20336 us Average time per allocation t0: 3.15283 us, t1: 4.2186 us, t2: 6.90219 us, t3: 3.37694 us Average time per allocation t0: 6.16077 us, t1: 3.82807 us, t2: 6.9629 us, t3: 7.15158 us Average time per allocation t0: 2.03225 us, t1: 7.43557 us, t2: 5.53722 us, t3: 6.27863 us === Average time per allocation t0: 33.0804 us, t1: 33.7081 us, t2: 17.6456 us, t3: 30.9348 us Average time per allocation t0: 24.3264 us, t1: 16.0403 us, t2: 11.3815 us, t3: 13.3941 us Average time per allocation t0: 24.2753 us, t1: 18.4966 us, t2: 13.2348 us, t3: 10.9884 us Average time per allocation t0: 12.2618 us, t1: 22.735 us, t2: 6.6326 us, t3: 4.4194 us Average time per allocation t0: 25.1121 us, t1: 3.9122 us, t2: 3.9161 us, t3: 4.3115 us Average time per allocation t0: 10.2712 us, t1: 4.6486 us, t2: 4.8833 us, t3: 3.5292 us Average time per allocation t0: 18.3275 us, t1: 8.8662 us, t2: 4.6046 us, t3: 5.0615 us Average time per allocation t0: 8.9764 us, t1: 3.0382 us, t2: 1.9809 us, t3: 1.8031 us Average time per allocation t0: 7.2872 us, t1: 3.8877 us, t2: 6.11 us, t3: 2.9014 us Average time per allocation t0: 15.3014 us, t1: 6.3474 us, t2: 4.669 us, t3: 5.4836 us === Average time per allocation t0: 7.8871 us, t1: 8.7318 us, t2: 6.0338 us, t3: 8.7323 us Average time per allocation t0: 32.542 us, t1: 27.0328 us, t2: 23.6022 us, t3: 18.8796 us Average time per allocation t0: 54.4187 us, t1: 28.8791 us, t2: 50.2106 us, t3: 38.2015 us Average time per allocation t0: 38.5584 us, t1: 36.935 us, t2: 37.7183 us, t3: 12.8957 us Average time per allocation t0: 43.5248 us, t1: 36.3683 us, t2: 106.566 us, t3: 6.0651 us Average time per allocation t0: 64.4751 us, t1: 15.77 us, t2: 37.8574 us, t3: 35.3858 us Average time per allocation t0: 28.6066 us, t1: 27.534 us, t2: 28.2634 us, t3: 28.4561 us Average time per allocation t0: 13.965 us, t1: 10.2564 us, t2: 12.1538 us, t3: 12.1741 us Average time per allocation t0: 11.3673 us, t1: 9.5956 us, t2: 9.5669 us, t3: 10.109 us Average time per allocation t0: 10.4644 us, t1: 14.1301 us, t2: 14.3035 us, t3: 14.2689 us === Average time per allocation t0: 4.33301 us, t1: 7.88455 us, t2: 13.7382 us, t3: 5.80664 us Average time per allocation t0: 5.556 us, t1: 7.20367 us, t2: 3.10486 us, t3: 5.81445 us Average time per allocation t0: 6.17938 us, t1: 5.69119 us, t2: 16.7435 us, t3: 9.58691 us Average time per allocation t0: 7.00839 us, t1: 7.19769 us, t2: 9.50562 us, t3: 51.8545 us Average time per allocation t0: 5.98334 us, t1: 7.95715 us, t2: 21.2284 us, t3: 95.7031 us Average time per allocation t0: 6.55927 us, t1: 4.26068 us, t2: 21.0148 us, t3: 4.13184 us Average time per allocation t0: 6.60266 us, t1: 4.68854 us, t2: 7.46313 us, t3: 4.0127 us Average time per allocation t0: 7.55699 us, t1: 6.77707 us, t2: 7.80579 us, t3: 10.084 us Average time per allocation t0: 4.99332 us, t1: 5.86044 us, t2: 6.70325 us, t3: 3.51758 us Average time per allocation t0: 5.81613 us, t1: 3.98154 us, t2: 4.44629 us, t3: 3.88965 us |85.1%| [TM] {BAZEL_UPLOAD} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/gtest >> TestFormatHandler::ManyRawClients ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::UseLocksCache [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:26:05.608947Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:05.735850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:26:05.736273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:26:05.736486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:06.025201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:06.025333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:06.114840Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767885962745809 != 1767885962745813 2026-01-08T15:26:06.129463Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:06.178138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:06.267955Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:26:06.582373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:06.598905Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:06.708176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:06.751851Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:26:06.753027Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:26:06.754545Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:892:2768] 2026-01-08T15:26:06.754852Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:26:06.765095Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:26:06.816825Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:878:2760], Recipient [1:896:2771]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:26:06.818049Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:26:06.818299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:26:06.820082Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:26:06.820156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:26:06.820207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:26:06.820559Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:26:06.820666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:878:2760], Recipient [1:896:2771]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:26:06.820971Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:896:2771] 2026-01-08T15:26:06.821159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:26:06.829257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:26:06.829391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:917:2768] in generation 1 2026-01-08T15:26:06.829609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:878:2760], Recipient [1:896:2771]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:26:06.830403Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:26:06.830489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:26:06.831894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:26:06.831989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:26:06.832039Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:26:06.832336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:26:06.832438Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:26:06.832505Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:923:2771] in generation 1 2026-01-08T15:26:06.843373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:26:06.884734Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:26:06.884955Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:26:06.885071Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:926:2788] 2026-01-08T15:26:06.885108Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:26:06.885143Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:26:06.885188Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:06.885470Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:892:2768], Recipient [1:892:2768]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:06.885523Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:06.885929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:26:06.885971Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:26:06.886031Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:26:06.886096Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:927:2789] 2026-01-08T15:26:06.886121Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:26:06.886145Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:26:06.886168Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:26:06.886352Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:26:06.886463Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:26:06.886565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:896:2771], Recipient [1:896:2771]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:06.886604Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:06.886734Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:26:06.886779Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:26:06.886827Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:26:06.886862Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:26:06.886895Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:26:06.886928Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:26:06.886971Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:26:06.887144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:893:2769], Recipient [1:892:2768]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:26:06.887187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:26:06.887229Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:885:2764], serverId# [1:893:2769], sessionId# [0:0:0] 2026-01-08T15:26:06.887277Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:26:06.887356Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:26:06.887757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:893:2769] 2026-01-08T15:26:06.887804Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:26:06.887914Z nod ... 24037888 is Executed 2026-01-08T15:26:15.860303Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:26:15.860339Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:1] at 72075186224037888 has finished 2026-01-08T15:26:15.871406Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2026-01-08T15:26:15.871462Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2026-01-08T15:26:15.871963Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:26:15.872000Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 1 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2026-01-08T15:26:15.872062Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:15.872448Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [2:1151:2949], Recipient [2:894:2770]: {TEvReadSet step# 3500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2026-01-08T15:26:15.872472Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:26:15.872495Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2026-01-08T15:26:16.281061Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [2:1193:2975], Recipient [2:1151:2949]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:26:16.286730Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:26:16.287838Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit CheckRead 2026-01-08T15:26:16.288973Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:26:16.289682Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:26:16.294890Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:26:16.294929Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:26:16.295754Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2026-01-08T15:26:16.296176Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:26:16.296201Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:26:16.296219Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:26:16.296239Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:26:16.296717Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:26:16.305260Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/18446744073709551615 2026-01-08T15:26:16.305635Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[2:1193:2975], 0} after executionsCount# 1 2026-01-08T15:26:16.306017Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[2:1193:2975], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:26:16.311089Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[2:1193:2975], 0} finished in read 2026-01-08T15:26:16.311808Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:26:16.312153Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:26:16.312502Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:26:16.312532Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:26:16.312582Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:26:16.312605Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:26:16.314065Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:2] at 72075186224037888 has finished 2026-01-08T15:26:16.314429Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:26:16.315169Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:26:16.319717Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [2:1193:2975], Recipient [2:1151:2949]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:26:16.320157Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } 2026-01-08T15:26:16.320634Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [2:1193:2975], Recipient [2:894:2770]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2026-01-08T15:26:16.320736Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2026-01-08T15:26:16.320785Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037889 on unit CheckRead 2026-01-08T15:26:16.321213Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037889 is Executed 2026-01-08T15:26:16.321239Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037889 executing on unit CheckRead 2026-01-08T15:26:16.321264Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037889 to execution unit BuildAndWaitDependencies 2026-01-08T15:26:16.321613Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037889 on unit BuildAndWaitDependencies 2026-01-08T15:26:16.321651Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037889 2026-01-08T15:26:16.321679Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037889 is Executed 2026-01-08T15:26:16.321697Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037889 executing on unit BuildAndWaitDependencies 2026-01-08T15:26:16.321716Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:26:16.322139Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:26:16.322222Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2026-01-08T15:26:16.325738Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v3500/18446744073709551615 2026-01-08T15:26:16.325777Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[2:1193:2975], 1} after executionsCount# 1 2026-01-08T15:26:16.325810Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[2:1193:2975], 1} sends rowCount# 2, bytes# 64, quota rows left# 997, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:26:16.325869Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[2:1193:2975], 1} finished in read 2026-01-08T15:26:16.325913Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037889 is Executed 2026-01-08T15:26:16.325935Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:26:16.326672Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:26:16.326702Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:26:16.326745Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037889 is Executed 2026-01-08T15:26:16.326765Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:26:16.326788Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:6] at 72075186224037889 has finished 2026-01-08T15:26:16.326813Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:26:16.326889Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:26:16.334979Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [2:1193:2975], Recipient [2:894:2770]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2026-01-08T15:26:16.335036Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 1 } |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_locks/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus [GOOD] >> MediatorTest::MultipleTablets [GOOD] >> MediatorTest::TabletAckBeforePlanComplete |85.1%| [TA] $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel_unstable/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus [GOOD] Test command err: 2026-01-08T15:25:49.738385Z node 1 :CMS DEBUG: sentinel.cpp:1047: [Sentinel] [Main] UpdateConfig 2026-01-08T15:25:49.738449Z node 1 :CMS DEBUG: sentinel.cpp:992: [Sentinel] [Main] Start ConfigUpdater 2026-01-08T15:25:49.738523Z node 1 :CMS DEBUG: sentinel.cpp:1063: [Sentinel] [Main] UpdateState 2026-01-08T15:25:49.738552Z node 1 :CMS INFO: sentinel.cpp:987: [Sentinel] [Main] StateUpdater was delayed 2026-01-08T15:25:49.738593Z node 1 :CMS DEBUG: sentinel.cpp:549: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2026-01-08T15:25:49.738681Z node 1 :CMS DEBUG: sentinel.cpp:562: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2026-01-08T15:25:49.741870Z node 1 :CMS DEBUG: sentinel.cpp:571: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2026-01-08T15:25:49.750865Z node 1 :CMS DEBUG: sentinel.cpp:624: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 100 ... 374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820110 2026-01-08T15:26:18.413938Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 2, response# PDiskStateInfo { PDiskId: 8 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-8.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 9 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-9.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820110 2026-01-08T15:26:18.415508Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 3, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37820110 2026-01-08T15:26:18.428449Z node 1 :CMS DEBUG: sentinel.cpp:1068: [Sentinel] [Main] State was updated in 0.000000s 2026-01-08T15:26:18.442648Z node 1 :CMS DEBUG: sentinel.cpp:1063: [Sentinel] [Main] UpdateState 2026-01-08T15:26:18.442702Z node 1 :CMS DEBUG: sentinel.cpp:992: [Sentinel] [Main] Start StateUpdater 2026-01-08T15:26:18.443171Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 1, wbId# [1:8388350642965737326:1634689637] 2026-01-08T15:26:18.443211Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 2, wbId# [2:8388350642965737326:1634689637] 2026-01-08T15:26:18.443237Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 3, wbId# [3:8388350642965737326:1634689637] 2026-01-08T15:26:18.443260Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 4, wbId# [4:8388350642965737326:1634689637] 2026-01-08T15:26:18.443956Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 5, wbId# [5:8388350642965737326:1634689637] 2026-01-08T15:26:18.443986Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 6, wbId# [6:8388350642965737326:1634689637] 2026-01-08T15:26:18.444686Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 7, wbId# [7:8388350642965737326:1634689637] 2026-01-08T15:26:18.444714Z node 1 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 8, wbId# [8:8388350642965737326:1634689637] 2026-01-08T15:26:18.449824Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 1, response# PDiskStateInfo { PDiskId: 4 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-4.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 5 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-5.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 6 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-6.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 7 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-7.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.455500Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 6, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 25 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 26 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-26.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.456774Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 7, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.460538Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 8, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 33 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-33.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 34 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-34.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 35 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-35.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.461916Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 2, response# PDiskStateInfo { PDiskId: 8 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-8.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 9 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-9.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.462499Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 3, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.463391Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 4, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.465071Z node 1 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 5, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860110 2026-01-08T15:26:18.465127Z node 1 :CMS DEBUG: sentinel.cpp:1068: [Sentinel] [Main] State was updated in 0.000000s 2026-01-08T15:26:18.469592Z node 1 :CMS NOTICE: sentinel.cpp:1147: [Sentinel] [Main] PDisk status changed: pdiskId# 5:20, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2026-01-08T15:26:18.469652Z node 1 :CMS NOTICE: sentinel.cpp:1147: [Sentinel] [Main] PDisk status changed: pdiskId# 3:14, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2026-01-08T15:26:18.469683Z node 1 :CMS NOTICE: sentinel.cpp:1147: [Sentinel] [Main] PDisk status changed: pdiskId# 4:19, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2026-01-08T15:26:18.470090Z node 1 :CMS DEBUG: sentinel.cpp:1212: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2026-01-08T15:26:18.471065Z node 1 :CMS DEBUG: sentinel.cpp:1343: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Status { }, cookie# 140 2026-01-08T15:26:18.471092Z node 1 :CMS ERROR: sentinel.cpp:1385: [Sentinel] [Main] Unsuccesful response from BSC: error# 2026-01-08T15:26:18.482426Z node 1 :CMS DEBUG: sentinel.cpp:1403: [Sentinel] [Main] Retrying: attempt# 1 2026-01-08T15:26:18.482483Z node 1 :CMS DEBUG: sentinel.cpp:1212: [Sentinel] [Main] Change pdisk status: requestsSize# 3 2026-01-08T15:26:18.484866Z node 1 :CMS DEBUG: sentinel.cpp:1343: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true, cookie# 141 2026-01-08T15:26:18.486074Z node 1 :CMS NOTICE: sentinel.cpp:1367: [Sentinel] [Main] PDisk status has been changed: pdiskId# 3:14 2026-01-08T15:26:18.496560Z node 1 :CMS NOTICE: sentinel.cpp:1367: [Sentinel] [Main] PDisk status has been changed: pdiskId# 4:19 2026-01-08T15:26:18.496617Z node 1 :CMS NOTICE: sentinel.cpp:1367: [Sentinel] [Main] PDisk status has been changed: pdiskId# 5:20 |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/cms/ut_sentinel_unstable/unittest >> TTxDataShardBuildFulltextDictScan::Build-SkipFirst+SkipLast [GOOD] >> TTxDataShardBuildFulltextDictScan::Build+SkipFirst+SkipLast >> TestFormatHandler::ManyRawClients [GOOD] >> TestFormatHandler::ClientValidation >> DataShardCompaction::CompactBorrowed [GOOD] >> DataShardCompaction::CompactBorrowedTxStatus >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] >> EventHolderPool::Overflow >> EventHolderPool::Overflow [GOOD] >> EventHolderPool::MemConsumptionSmall [GOOD] >> EventHolderPool::MemConsumptionLarge [GOOD] >> Interconnect::SessionContinuation >> MediatorTest::TabletAckBeforePlanComplete [GOOD] |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yaml_config/ut_transform/py3test >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] |85.1%| [TM] {BAZEL_UPLOAD} ydb/library/yaml_config/ut_transform/py3test >> MediatorTest::TabletAckWhenDead >> TestFormatHandler::ClientValidation [GOOD] >> DescribeSchemaSecretsService::GetUpdatedValue [GOOD] >> DescribeSchemaSecretsService::GetUnexistingValue >> TestFormatHandler::ClientError |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] |85.1%| [TM] {RESULT} ydb/core/blobstorage/ut_mirror3of4/unittest |85.1%| [TM] {RESULT} ydb/library/yql/dq/comp_nodes/ut/unittest |85.1%| [TM] {RESULT} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest |85.1%| [TM] {RESULT} ydb/core/blob_depot/ut/unittest |85.1%| [TM] {RESULT} ydb/library/yql/providers/solomon/actors/ut/unittest |85.1%| [TM] {RESULT} ydb/core/cms/ut_sentinel_unstable/unittest |85.1%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |85.1%| [TM] {RESULT} ydb/library/yaml_config/ut_transform/py3test |85.1%| [TM] {RESULT} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_128/gtest |85.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> TTxDataShardBuildFulltextDictScan::Build+SkipFirst+SkipLast [GOOD] >> TTxDataShardBuildFulltextIndexScan::BadRequest >> TestFormatHandler::ClientError [GOOD] |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |85.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> TestFormatHandler::ClientErrorWithEmptyFilter >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::BrokenCredentialsProvider >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> CompressExecutor::TestReorderedExecutor >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> MediatorTest::TabletAckWhenDead [GOOD] >> MediatorTest::PlanStepAckToReconnectedMediator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] Test command err: RandomSeed# 6417426663612116539 Reassign# 7 -- VSlotId { NodeId: 8 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 1 VDiskKind: "Default" FailDomainIdx: 7 VDiskMetrics { SatisfactionRank: 0 VSlotId { NodeId: 8 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:1:1:0:0:20:0] Put# [1:1:2:0:0:77:0] Put# [1:1:3:0:0:61:0] Put# [1:1:4:0:0:65:0] Put# [1:1:5:0:0:74:0] 2026-01-08T15:23:54.625883Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2026-01-08T15:23:54.627606Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12326520411422772582] 2026-01-08T15:23:54.637278Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:1:0:0:20:3] 2026-01-08T15:23:54.637360Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:2:0:0:77:4] 2026-01-08T15:23:54.637393Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:3:0:0:61:5] 2026-01-08T15:23:54.637452Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:4:0:0:65:5] 2026-01-08T15:23:54.637489Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:5:0:0:74:6] 2026-01-08T15:23:54.637991Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 5 PartsResurrected# 5 Put# [1:1:6:0:0:29:0] Put# [1:1:7:0:0:83:0] Put# [1:1:8:0:0:51:0] Put# [1:1:9:0:0:83:0] Put# [1:1:10:0:0:8:0] Put# [1:1:11:0:0:14:0] Put# [1:1:12:0:0:5:0] Put# [1:1:13:0:0:70:0] Put# [1:1:14:0:0:63:0] Put# [1:1:15:0:0:22:0] Put# [1:1:16:0:0:34:0] Put# [1:1:17:0:0:58:0] Put# [1:1:18:0:0:56:0] Put# [1:1:19:0:0:1:0] Put# [1:1:20:0:0:52:0] Put# [1:1:21:0:0:87:0] Put# [1:1:22:0:0:95:0] Put# [1:1:23:0:0:61:0] Put# [1:1:24:0:0:76:0] Put# [1:1:25:0:0:63:0] Put# [1:1:26:0:0:28:0] Put# [1:1:27:0:0:40:0] Put# [1:1:28:0:0:46:0] Put# [1:1:29:0:0:55:0] Put# [1:1:30:0:0:90:0] Put# [1:1:31:0:0:93:0] Put# [1:1:32:0:0:29:0] Put# [1:1:33:0:0:79:0] Put# [1:1:34:0:0:9:0] Put# [1:1:35:0:0:70:0] Put# [1:1:36:0:0:8:0] Put# [1:1:37:0:0:55:0] Put# [1:1:38:0:0:22:0] Put# [1:1:39:0:0:42:0] Put# [1:1:40:0:0:57:0] Put# [1:1:41:0:0:38:0] Put# [1:1:42:0:0:36:0] Put# [1:1:43:0:0:96:0] Put# [1:1:44:0:0:73:0] Put# [1:1:45:0:0:64:0] Put# [1:1:46:0:0:77:0] Put# [1:1:47:0:0:51:0] Put# [1:1:48:0:0:46:0] Put# [1:1:49:0:0:26:0] Put# [1:1:50:0:0:78:0] Put# [1:1:51:0:0:92:0] Put# [1:1:52:0:0:7:0] Put# [1:1:53:0:0:57:0] Put# [1:1:54:0:0:34:0] Put# [1:1:55:0:0:20:0] Put# [1:1:56:0:0:39:0] Put# [1:1:57:0:0:22:0] Put# [1:1:58:0:0:25:0] Put# [1:1:59:0:0:13:0] Put# [1:1:60:0:0:87:0] Put# [1:1:61:0:0:65:0] Put# [1:1:62:0:0:52:0] Put# [1:1:63:0:0:38:0] Put# [1:1:64:0:0:40:0] Put# [1:1:65:0:0:74:0] Put# [1:1:66:0:0:26:0] Put# [1:1:67:0:0:10:0] Put# [1:1:68:0:0:54:0] Put# [1:1:69:0:0:9:0] Put# [1:1:70:0:0:75:0] Put# [1:1:71:0:0:37:0] Put# [1:1:72:0:0:19:0] Put# [1:1:73:0:0:47:0] Put# [1:1:74:0:0:90:0] Put# [1:1:75:0:0:100:0] Put# [1:1:76:0:0:60:0] Put# [1:1:77:0:0:96:0] Put# [1:1:78:0:0:12:0] Put# [1:1:79:0:0:70:0] Put# [1:1:80:0:0:87:0] Put# [1:1:81:0:0:82:0] Put# [1:1:82:0:0:86:0] Put# [1:1:83:0:0:10:0] Put# [1:1:84:0:0:48:0] Put# [1:1:85:0:0:92:0] Put# [1:1:86:0:0:41:0] Put# [1:1:87:0:0:25:0] Put# [1:1:88:0:0:52:0] Put# [1:1:89:0:0:54:0] Put# [1:1:90:0:0:74:0] Put# [1:1:91:0:0:66:0] Put# [1:1:92:0:0:17:0] Put# [1:1:93:0:0:90:0] Put# [1:1:94:0:0:42:0] Put# [1:1:95:0:0:8:0] Put# [1:1:96:0:0:43:0] Put# [1:1:97:0:0:23:0] Put# [1:1:98:0:0:80:0] Put# [1:1:99:0:0:63:0] Put# [1:1:100:0:0:48:0] Put# [1:1:101:0:0:31:0] Put# [1:1:102:0:0:55:0] Put# [1:1:103:0:0:17:0] Put# [1:1:104:0:0:30:0] Put# [1:1:105:0:0:38:0] Put# [1:1:106:0:0:41:0] Put# [1:1:107:0:0:96:0] Put# [1:1:108:0:0:79:0] Put# [1:1:109:0:0:94:0] Put# [1:1:110:0:0:67:0] Put# [1:1:111:0:0:93:0] Put# [1:1:112:0:0:70:0] Put# [1:1:113:0:0:37:0] Put# [1:1:114:0:0:36:0] Put# [1:1:115:0:0:26:0] Put# [1:1:116:0:0:44:0] Put# [1:1:117:0:0:76:0] Put# [1:1:118:0:0:47:0] Put# [1:1:119:0:0:52:0] Put# [1:1:120:0:0:1:0] Put# [1:1:121:0:0:91:0] Put# [1:1:122:0:0:47:0] Put# [1:1:123:0:0:63:0] Put# [1:1:124:0:0:57:0] Put# [1:1:125:0:0:34:0] Put# [1:1:126:0:0:15:0] Put# [1:1:127:0:0:41:0] Put# [1:1:128:0:0:16:0] Put# [1:1:129:0:0:93:0] Put# [1:1:130:0:0:26:0] Put# [1:1:131:0:0:100:0] Put# [1:1:132:0:0:83:0] Put# [1:1:133:0:0:32:0] Put# [1:1:134:0:0:60:0] Put# [1:1:135:0:0:29:0] Put# [1:1:136:0:0:49:0] Put# [1:1:137:0:0:98:0] Put# [1:1:138:0:0:99:0] Put# [1:1:139:0:0:63:0] Put# [1:1:140:0:0:2:0] Put# [1:1:141:0:0:55:0] Put# [1:1:142:0:0:48:0] Put# [1:1:143:0:0:12:0] Put# [1:1:144:0:0:8:0] Put# [1:1:145:0:0:98:0] Put# [1:1:146:0:0:10:0] Put# [1:1:147:0:0:5:0] Put# [1:1:148:0:0:68:0] Put# [1:1:149:0:0:92:0] Put# [1:1:150:0:0:33:0] Put# [1:1:151:0:0:17:0] Put# [1:1:152:0:0:3:0] Put# [1:1:153:0:0:100:0] Put# [1:1:154:0:0:27:0] Put# [1:1:155:0:0:39:0] Put# [1:1:156:0:0:12:0] Put# [1:1:157:0:0:91:0] Put# [1:1:158:0:0:100:0] Put# [1:1:159:0:0:36:0] Put# [1:1:160:0:0:84:0] Put# [1:1:161:0:0:77:0] Put# [1:1:162:0:0:52:0] Put# [1:1:163:0:0:86:0] Put# [1:1:164:0:0:92:0] Put# [1:1:165:0:0:98:0] Put# [1:1:166:0:0:79:0] Put# [1:1:167:0:0:19:0] Put# [1:1:168:0:0:65:0] Put# [1:1:169:0:0:28:0] Put# [1:1:170:0:0:33:0] Put# [1:1:171:0:0:27:0] Put# [1:1:172:0:0:34:0] Put# [1:1:173:0:0:16:0] Put# [1:1:174:0:0:93:0] Put# [1:1:175:0:0:40:0] Put# [1:1:176:0:0:73:0] Put# [1:1:177:0:0:45:0] Put# [1:1:178:0:0:65:0] Put# [1:1:179:0:0:35:0] Put# [1:1:180:0:0:2:0] Put# [1:1:181:0:0:59:0] Put# [1:1:182:0:0:98:0] Put# [1:1:183:0:0:15:0] Put# [1:1:184:0:0:43:0] Put# [1:1:185:0:0:93:0] Put# [1:1:186:0:0:93:0] Put# [1:1:187:0:0:17:0] Put# [1:1:188:0:0:20:0] Put# [1:1:189:0:0:18:0] Put# [1:1:190:0:0:79:0] Put# [1:1:191:0:0:91:0] Put# [1:1:192:0:0:69:0] Put# [1:1:193:0:0:22:0] Put# [1:1:194:0:0:2:0] Put# [1:1:195:0:0:17:0] Put# [1:1:196:0:0:5:0] Put# [1:1:197:0:0:95:0] Put# [1:1:198:0:0:95:0] Put# [1:1:199:0:0:80:0] Put# [1:1:200:0:0:58:0] Put# [1:1:201:0:0:88:0] Put# [1:1:202:0:0:1:0] Put# [1:1:203:0:0:93:0] Put# [1:1:204:0:0:65:0] Put# [1:1:205:0:0:43:0] Put# [1:1:206:0:0:77:0] Put# [1:1:207:0:0:86:0] Put# [1:1:208:0:0:27:0] Put# [1:1:209:0:0:12:0] Put# [1:1:210:0:0:33:0] Put# [1:1:211:0:0:89:0] Put# [1:1:212:0:0:53:0] Put# [1:1:213:0:0:58:0] Put# [1:1:214:0:0:87:0] Put# [1:1:215:0:0:11:0] Put# [1:1:216:0:0:86:0] Put# [1:1:217:0:0:88:0] Put# [1:1:218:0:0:43:0] Put# [1:1:219:0:0:4:0] Put# [1:1:220:0:0:87:0] Put# [1:1:221:0:0:88:0] Put# [1:1:222:0:0:46:0] Put# [1:1:223:0:0:85:0] Put# [1:1:224:0:0:96:0] Put# [1:1:225:0:0:65:0] Put# [1:1:226:0:0:61:0] Put# [1:1:227:0:0:81:0] Put# [1:1:228:0:0:72:0] Put# [1:1:229:0:0:76:0] Put# [1:1:230:0:0:96:0] Put# [1:1:231:0:0:53:0] Put# [1:1:232:0:0:1:0] Put# [1:1:233:0:0:23:0] Put# [1:1:234:0:0:100:0] Put# [1:1:235:0:0:62:0] Put# [1:1:236:0:0:78:0] Put# [1:1:237:0:0:74:0] Put# [1:1:238:0:0:19:0] Put# [1:1:239:0:0:100:0] Put# [1:1:240:0:0:67:0] Put# [1:1:241:0:0:18:0] Put# [1:1:242:0:0:53:0] Put# [1:1:243:0:0:32:0] Put# [1:1:244:0:0:55:0] Put# [1:1:245:0:0:67:0] Put# [1:1:246:0:0:8:0] Put# [1:1:247:0:0:94:0] Put# [1:1:248:0:0:84:0] Put# [1:1:249:0:0:29:0] Put# [1:1:250:0:0:94:0] Put# [1:1:251:0:0:67:0] Put# [1:1:252:0:0:46:0] Put# [1:1:253:0:0:25:0] Put# [1:1:254:0:0:6:0] Put# [1:1:255:0:0:64:0] Put# [1:1:256:0:0:27:0] Put# [1:1:257:0:0:84:0] Put# [1:1:258:0:0:41:0] Put# [1:1:259:0:0:83:0] Put# [1:1:260:0:0:61:0] Put# [1:1:261:0:0:7:0] Put# [1:1:262:0:0:55:0] Put# [1:1:263:0:0:56:0] Put# [1:1:264:0:0:35:0] Put# [1:1:265:0:0:1:0] Put# [1:1:266:0:0:2:0] Put# [1:1:267:0:0:70:0] Put# [1:1:268:0:0:20:0] Put# [1:1:269:0:0:21:0] Put# [1:1:270:0:0:72:0] Put# [1:1:271:0:0:13:0] Put# [1:1:272:0:0:82:0] Put# [1:1:273:0:0:93:0] Put# [1:1:274:0:0:20:0] Put# [1:1:275:0:0:98:0] Put# [1:1:276:0:0:49:0] Put# [1:1:277:0:0:80:0] Put# [1:1:278:0:0:70:0] Put# [1:1:279:0:0:88:0] Put# [1:1:280:0:0:7:0] Put# [1:1:281:0:0:92:0] Put# [1:1:282:0:0:76:0] Put# [1:1:283:0:0:21:0] Put# [1:1:284:0:0:40:0] Put# [1:1:285:0:0:46:0] Put# [1:1:286:0:0:49:0] Put# [1:1:287:0:0:74:0] Put# [1:1:288:0:0:27:0] Put# [1:1:289:0:0:85:0] Put# [1:1:290:0:0:90:0] Put# [1:1:291:0:0:77:0] Put# [1:1:292:0:0:84:0] Put# [1:1:293:0:0:80:0] Put# [1:1:294:0:0:26:0] Put# [1:1:295:0:0:80:0] Put# [1:1:296:0:0:45:0] Put# [1:1:297:0:0:53:0] Put# [1:1:298:0:0:21:0] Put# [1:1:299:0:0:33:0] Put# [1:1:300:0:0:77:0] Put# [1:1:301:0:0:55:0] Put# [1:1:302:0:0:71:0] Put# [1:1:303:0:0:63:0] Put# [1:1:304:0:0:21:0] Put# [1:1:305:0:0:89:0] Put# [1:1:306:0:0:4:0] Put# [1:1:307:0:0:82:0] Put# [1:1:308:0:0:4:0] Put# [1:1:309:0:0:9:0] Put# [1:1:310:0:0:4:0] Put# [1:1:311:0:0:58:0] Put# [1:1:312:0:0:50:0] Put# [1:1:313:0:0:32:0] Put# [1:1:314:0:0:9:0] Put# [1:1:315:0:0:90:0] Put# [1:1:316:0:0:53:0] Put# [1:1:317:0:0:47:0] Put# [1:1:318:0:0:99:0] Put# [1:1:319:0:0:25:0] Put# [1:1:320:0:0:55:0] Put# [1:1:321:0:0:69:0] Put# [1:1:322:0:0:1:0] Put# [1:1:323:0:0:9:0] Put# [1:1:324:0:0:28:0] Put# [1:1:325:0:0:50:0] Put# [1:1:326:0:0:97:0] Put# [1:1:327:0:0:23:0] Put# [1:1:328:0:0:64:0] Put# [1:1:329:0:0:16:0] Put# [1:1:330:0:0:68:0] Put# [1:1:331:0:0:14:0] Put# [1:1:332:0:0:2:0] Put# [1:1:333:0:0:46:0] Put# [1:1:334:0:0:12:0] Put# [1:1:335:0:0:98:0] Put# [1:1:336:0:0:61:0] Put# [1:1:337:0:0:13:0] Put# [1:1:338:0:0:26:0] Put# [1:1:339:0:0:46:0] Put# [1:1:340:0:0:66:0] Put# [1:1:341:0:0:39:0] Put# [1:1:342:0:0:48:0] Put# [1:1:343:0:0:71:0] Put# [1:1:344:0:0:90:0] Put# [1:1:345:0:0:69:0] Put# [1:1:346:0:0:70:0] Put# [1:1:347:0:0:13:0] Put# [1:1:348:0:0:41:0] Put# [1:1:349:0:0:89:0] Put# [1:1:350:0:0:62:0] Put# [1:1:351:0:0:18:0] Put# [1:1:352:0:0:69:0] Put# [1:1:353:0:0:22:0] Put# [1:1:354:0:0:37:0] Put# [1:1:355:0:0:97:0] Put# [1:1:356:0:0:58:0] Put# [1:1:357:0:0:53:0] Put# [1:1:358:0:0:62:0] Put# [1:1:359:0:0:8:0] Put# [1:1:360:0:0:56:0] Put# [1:1:361:0:0:23:0] Put# [1:1:362:0:0:41:0] Put# [1:1:363:0:0:62:0] Put# [1:1:364:0:0:98:0] Put# [1:1:365:0:0:63:0] Put# [1:1:366:0:0:21:0] Put# [1:1:367:0:0:5:0] Put# [1:1:368:0:0:84:0] Put# [1:1:369:0:0:77:0] Put# [1:1:370:0:0:6:0] Put# [1:1:371:0:0:21:0] Put# [1:1:372:0:0:35:0] Put# [1:1:373:0:0:75:0] Put# [1:1:374:0:0:28:0] Put# [1:1:375:0:0:92:0] Put# [1:1:376:0:0:93:0] Put# [1:1:377:0:0:83:0] Put# [1:1:378:0:0:51:0] Put# [1:1:379:0:0:64:0] Put# [1:1:380:0:0:91:0] Put# [1:1:381:0:0:49:0] Put# [1:1:382:0:0:96:0] Put# [1:1:383:0:0:99:0] Put# [1:1:384:0:0:86:0] Put# [1:1:385:0:0:3:0] Put# [1:1:386:0:0:30:0] Put# [1:1:387:0:0:12:0] Put# [1:1:388:0:0:65:0] Put# [1:1:389:0:0:27:0] Put# [1:1:390:0:0:44:0] Put# [1:1:391:0:0:29:0] Put# [1:1:392:0:0:16:0] Put# [1:1:393:0:0:8:0] Put# [1:1:394:0:0:71:0] Put# [1:1:395:0:0:2:0] Put# [1:1:396:0:0:27:0] Put# [1:1:397:0:0:78:0] Put# [1:1:398:0:0:99:0] Put# [1:1:399:0:0:42:0] Put# [1:1:400:0:0:30:0] Put# [1:1:401:0:0:43:0] Put# [1:1:402:0:0:37:0] Put# [1:1:403:0:0:23:0] Put# [1:1:404:0:0:73:0] Put# [1:1:405:0:0:96:0] Put# [1:1:406:0:0:87:0] Put# [1:1:407:0:0:9:0] Put# [1:1:408:0:0:87:0] Put# [1:1:409:0:0:75:0] Put# [1:1:410:0:0:95:0] Put# [1:1:411:0:0:85:0] Put# [1:1:412:0:0:54:0] Put# [1:1:413:0:0:43:0] Put# [1:1:414:0:0:4:0] Put# [1:1:415:0:0:64:0] Put# [1:1:416:0:0:32:0] Put# [1:1:417:0:0:95:0] Put# [1:1:418:0:0:59:0] Put# [1:1:419:0:0:66:0] Put# [1:1:420:0:0:96:0] Put# [1:1:421:0:0:96:0] Put# [1:1:422:0:0:40:0] Put# [1:1:423:0:0:9:0] Put# [1:1:424:0:0:93:0] Put# [1:1:425:0:0:30:0] Put# [1:1:426:0:0:76:0] Put# [1:1:427:0:0:84:0] Put# [1:1:428:0:0:95:0] Put# [1:1:429:0:0:21:0] Put# [1:1:430:0:0:39:0] Put# [1:1:431:0:0:40:0] Put# [1:1:432:0 ... 0:0:49:0] Put# [1:3:2122:0:0:13:0] Put# [1:3:2123:0:0:40:0] Put# [1:3:2124:0:0:60:0] Put# [1:3:2125:0:0:90:0] Put# [1:3:2126:0:0:12:0] Put# [1:3:2127:0:0:59:0] Put# [1:3:2128:0:0:66:0] Put# [1:3:2129:0:0:80:0] Put# [1:3:2130:0:0:3:0] Put# [1:3:2131:0:0:46:0] Put# [1:3:2132:0:0:41:0] Put# [1:3:2133:0:0:67:0] Put# [1:3:2134:0:0:6:0] Put# [1:3:2135:0:0:72:0] Put# [1:3:2136:0:0:20:0] Put# [1:3:2137:0:0:44:0] Put# [1:3:2138:0:0:86:0] Put# [1:3:2139:0:0:36:0] Put# [1:3:2140:0:0:57:0] Put# [1:3:2141:0:0:94:0] Put# [1:3:2142:0:0:74:0] Put# [1:3:2143:0:0:87:0] Put# [1:3:2144:0:0:14:0] Put# [1:3:2145:0:0:19:0] Put# [1:3:2146:0:0:81:0] Put# [1:3:2147:0:0:88:0] Put# [1:3:2148:0:0:32:0] Put# [1:3:2149:0:0:29:0] Put# [1:3:2150:0:0:99:0] Put# [1:3:2151:0:0:65:0] Put# [1:3:2152:0:0:54:0] Put# [1:3:2153:0:0:79:0] Put# [1:3:2154:0:0:11:0] Put# [1:3:2155:0:0:57:0] Put# [1:3:2156:0:0:13:0] Put# [1:3:2157:0:0:25:0] Put# [1:3:2158:0:0:92:0] Put# [1:3:2159:0:0:65:0] Put# [1:3:2160:0:0:73:0] Put# [1:3:2161:0:0:80:0] Put# [1:3:2162:0:0:53:0] Put# [1:3:2163:0:0:87:0] Put# [1:3:2164:0:0:98:0] Put# [1:3:2165:0:0:54:0] Put# [1:3:2166:0:0:93:0] Put# [1:3:2167:0:0:87:0] Put# [1:3:2168:0:0:37:0] Put# [1:3:2169:0:0:28:0] Put# [1:3:2170:0:0:91:0] Put# [1:3:2171:0:0:42:0] Put# [1:3:2172:0:0:82:0] Put# [1:3:2173:0:0:47:0] Put# [1:3:2174:0:0:70:0] Put# [1:3:2175:0:0:65:0] Put# [1:3:2176:0:0:30:0] Put# [1:3:2177:0:0:55:0] Put# [1:3:2178:0:0:76:0] Put# [1:3:2179:0:0:50:0] Put# [1:3:2180:0:0:24:0] Put# [1:3:2181:0:0:44:0] Put# [1:3:2182:0:0:49:0] Put# [1:3:2183:0:0:97:0] Put# [1:3:2184:0:0:79:0] Put# [1:3:2185:0:0:26:0] Put# [1:3:2186:0:0:94:0] Put# [1:3:2187:0:0:9:0] Put# [1:3:2188:0:0:71:0] Put# [1:3:2189:0:0:18:0] Put# [1:3:2190:0:0:52:0] Put# [1:3:2191:0:0:32:0] Put# [1:3:2192:0:0:54:0] Put# [1:3:2193:0:0:32:0] Put# [1:3:2194:0:0:23:0] Put# [1:3:2195:0:0:100:0] Put# [1:3:2196:0:0:26:0] Put# [1:3:2197:0:0:61:0] Put# [1:3:2198:0:0:2:0] Put# [1:3:2199:0:0:91:0] Put# [1:3:2200:0:0:66:0] Put# [1:3:2201:0:0:6:0] Put# [1:3:2202:0:0:40:0] Put# [1:3:2203:0:0:59:0] Put# [1:3:2204:0:0:12:0] Put# [1:3:2205:0:0:71:0] Put# [1:3:2206:0:0:44:0] Put# [1:3:2207:0:0:64:0] Put# [1:3:2208:0:0:46:0] Put# [1:3:2209:0:0:71:0] Put# [1:3:2210:0:0:23:0] Put# [1:3:2211:0:0:53:0] Put# [1:3:2212:0:0:57:0] Put# [1:3:2213:0:0:40:0] Put# [1:3:2214:0:0:100:0] Put# [1:3:2215:0:0:45:0] Put# [1:3:2216:0:0:34:0] Put# [1:3:2217:0:0:67:0] Put# [1:3:2218:0:0:60:0] Put# [1:3:2219:0:0:82:0] Put# [1:3:2220:0:0:44:0] Put# [1:3:2221:0:0:95:0] Put# [1:3:2222:0:0:94:0] Put# [1:3:2223:0:0:27:0] Put# [1:3:2224:0:0:18:0] Put# [1:3:2225:0:0:78:0] Put# [1:3:2226:0:0:93:0] Put# [1:3:2227:0:0:29:0] Put# [1:3:2228:0:0:10:0] Put# [1:3:2229:0:0:89:0] Put# [1:3:2230:0:0:52:0] Put# [1:3:2231:0:0:70:0] Put# [1:3:2232:0:0:51:0] Put# [1:3:2233:0:0:29:0] Put# [1:3:2234:0:0:28:0] Put# [1:3:2235:0:0:42:0] Put# [1:3:2236:0:0:87:0] Put# [1:3:2237:0:0:20:0] Put# [1:3:2238:0:0:33:0] Put# [1:3:2239:0:0:21:0] Put# [1:3:2240:0:0:30:0] Put# [1:3:2241:0:0:62:0] Put# [1:3:2242:0:0:23:0] Put# [1:3:2243:0:0:9:0] Put# [1:3:2244:0:0:22:0] Put# [1:3:2245:0:0:58:0] Put# [1:3:2246:0:0:44:0] Put# [1:3:2247:0:0:55:0] Put# [1:3:2248:0:0:11:0] Put# [1:3:2249:0:0:58:0] Put# [1:3:2250:0:0:44:0] Put# [1:3:2251:0:0:52:0] Put# [1:3:2252:0:0:40:0] Put# [1:3:2253:0:0:83:0] Put# [1:3:2254:0:0:92:0] Put# [1:3:2255:0:0:67:0] Put# [1:3:2256:0:0:16:0] Put# [1:3:2257:0:0:60:0] Put# [1:3:2258:0:0:79:0] Put# [1:3:2259:0:0:100:0] Put# [1:3:2260:0:0:60:0] Put# [1:3:2261:0:0:54:0] Put# [1:3:2262:0:0:45:0] Put# [1:3:2263:0:0:96:0] Put# [1:3:2264:0:0:63:0] Put# [1:3:2265:0:0:65:0] Put# [1:3:2266:0:0:54:0] Put# [1:3:2267:0:0:19:0] Put# [1:3:2268:0:0:92:0] Put# [1:3:2269:0:0:17:0] Put# [1:3:2270:0:0:36:0] Put# [1:3:2271:0:0:90:0] Put# [1:3:2272:0:0:44:0] Put# [1:3:2273:0:0:32:0] Put# [1:3:2274:0:0:53:0] Put# [1:3:2275:0:0:22:0] Put# [1:3:2276:0:0:55:0] Put# [1:3:2277:0:0:3:0] Put# [1:3:2278:0:0:21:0] Put# [1:3:2279:0:0:43:0] Put# [1:3:2280:0:0:78:0] Put# [1:3:2281:0:0:3:0] Put# [1:3:2282:0:0:81:0] Put# [1:3:2283:0:0:33:0] Put# [1:3:2284:0:0:96:0] Put# [1:3:2285:0:0:35:0] Put# [1:3:2286:0:0:12:0] Put# [1:3:2287:0:0:64:0] Put# [1:3:2288:0:0:7:0] Put# [1:3:2289:0:0:16:0] Put# [1:3:2290:0:0:63:0] Put# [1:3:2291:0:0:32:0] Put# [1:3:2292:0:0:94:0] Put# [1:3:2293:0:0:97:0] Put# [1:3:2294:0:0:79:0] Put# [1:3:2295:0:0:75:0] Put# [1:3:2296:0:0:8:0] Put# [1:3:2297:0:0:3:0] Put# [1:3:2298:0:0:34:0] Put# [1:3:2299:0:0:57:0] Put# [1:3:2300:0:0:55:0] Put# [1:3:2301:0:0:91:0] Put# [1:3:2302:0:0:42:0] Put# [1:3:2303:0:0:23:0] Put# [1:3:2304:0:0:12:0] Put# [1:3:2305:0:0:29:0] Put# [1:3:2306:0:0:79:0] Put# [1:3:2307:0:0:54:0] Put# [1:3:2308:0:0:81:0] Put# [1:3:2309:0:0:89:0] Put# [1:3:2310:0:0:33:0] Put# [1:3:2311:0:0:6:0] Put# [1:3:2312:0:0:16:0] Put# [1:3:2313:0:0:89:0] Put# [1:3:2314:0:0:39:0] Put# [1:3:2315:0:0:57:0] Put# [1:3:2316:0:0:62:0] Put# [1:3:2317:0:0:53:0] Put# [1:3:2318:0:0:68:0] Put# [1:3:2319:0:0:97:0] Put# [1:3:2320:0:0:40:0] Put# [1:3:2321:0:0:78:0] Put# [1:3:2322:0:0:39:0] Put# [1:3:2323:0:0:37:0] Put# [1:3:2324:0:0:76:0] Put# [1:3:2325:0:0:1:0] Put# [1:3:2326:0:0:6:0] Put# [1:3:2327:0:0:38:0] Put# [1:3:2328:0:0:18:0] Put# [1:3:2329:0:0:99:0] Put# [1:3:2330:0:0:69:0] Put# [1:3:2331:0:0:60:0] Put# [1:3:2332:0:0:85:0] Put# [1:3:2333:0:0:76:0] Put# [1:3:2334:0:0:10:0] Put# [1:3:2335:0:0:77:0] Put# [1:3:2336:0:0:79:0] Put# [1:3:2337:0:0:49:0] Put# [1:3:2338:0:0:50:0] Put# [1:3:2339:0:0:49:0] Put# [1:3:2340:0:0:44:0] Put# [1:3:2341:0:0:32:0] Put# [1:3:2342:0:0:8:0] Put# [1:3:2343:0:0:3:0] Put# [1:3:2344:0:0:15:0] Put# [1:3:2345:0:0:48:0] Put# [1:3:2346:0:0:84:0] Put# [1:3:2347:0:0:18:0] Put# [1:3:2348:0:0:9:0] Put# [1:3:2349:0:0:19:0] Put# [1:3:2350:0:0:55:0] Put# [1:3:2351:0:0:89:0] Put# [1:3:2352:0:0:75:0] Put# [1:3:2353:0:0:57:0] Put# [1:3:2354:0:0:33:0] Put# [1:3:2355:0:0:31:0] Put# [1:3:2356:0:0:16:0] Put# [1:3:2357:0:0:86:0] Put# [1:3:2358:0:0:47:0] Put# [1:3:2359:0:0:50:0] Put# [1:3:2360:0:0:70:0] Put# [1:3:2361:0:0:41:0] Put# [1:3:2362:0:0:62:0] Put# [1:3:2363:0:0:45:0] Put# [1:3:2364:0:0:6:0] Put# [1:3:2365:0:0:1:0] Put# [1:3:2366:0:0:57:0] Put# [1:3:2367:0:0:7:0] Put# [1:3:2368:0:0:38:0] Put# [1:3:2369:0:0:32:0] Put# [1:3:2370:0:0:16:0] Put# [1:3:2371:0:0:2:0] Put# [1:3:2372:0:0:40:0] Put# [1:3:2373:0:0:72:0] Put# [1:3:2374:0:0:50:0] Put# [1:3:2375:0:0:89:0] Put# [1:3:2376:0:0:50:0] Put# [1:3:2377:0:0:24:0] Put# [1:3:2378:0:0:75:0] Put# [1:3:2379:0:0:97:0] Put# [1:3:2380:0:0:42:0] Put# [1:3:2381:0:0:24:0] Put# [1:3:2382:0:0:37:0] Put# [1:3:2383:0:0:26:0] Put# [1:3:2384:0:0:61:0] Put# [1:3:2385:0:0:41:0] Put# [1:3:2386:0:0:86:0] Put# [1:3:2387:0:0:12:0] Put# [1:3:2388:0:0:32:0] Put# [1:3:2389:0:0:9:0] Put# [1:3:2390:0:0:47:0] Put# [1:3:2391:0:0:5:0] Put# [1:3:2392:0:0:95:0] Put# [1:3:2393:0:0:32:0] Put# [1:3:2394:0:0:8:0] Put# [1:3:2395:0:0:59:0] Put# [1:3:2396:0:0:72:0] Put# [1:3:2397:0:0:58:0] Put# [1:3:2398:0:0:75:0] Put# [1:3:2399:0:0:20:0] Put# [1:3:2400:0:0:100:0] Put# [1:3:2401:0:0:33:0] Put# [1:3:2402:0:0:28:0] Put# [1:3:2403:0:0:73:0] Put# [1:3:2404:0:0:29:0] Put# [1:3:2405:0:0:60:0] Put# [1:3:2406:0:0:71:0] Put# [1:3:2407:0:0:99:0] Put# [1:3:2408:0:0:36:0] Put# [1:3:2409:0:0:86:0] Put# [1:3:2410:0:0:78:0] Put# [1:3:2411:0:0:96:0] Put# [1:3:2412:0:0:66:0] Put# [1:3:2413:0:0:55:0] Put# [1:3:2414:0:0:5:0] Put# [1:3:2415:0:0:48:0] Put# [1:3:2416:0:0:37:0] Put# [1:3:2417:0:0:36:0] Put# [1:3:2418:0:0:87:0] Put# [1:3:2419:0:0:12:0] Put# [1:3:2420:0:0:62:0] Put# [1:3:2421:0:0:94:0] Put# [1:3:2422:0:0:40:0] Put# [1:3:2423:0:0:48:0] Put# [1:3:2424:0:0:15:0] Put# [1:3:2425:0:0:66:0] Put# [1:3:2426:0:0:20:0] Put# [1:3:2427:0:0:79:0] Put# [1:3:2428:0:0:91:0] Put# [1:3:2429:0:0:5:0] Put# [1:3:2430:0:0:27:0] Put# [1:3:2431:0:0:15:0] Put# [1:3:2432:0:0:57:0] Put# [1:3:2433:0:0:75:0] Put# [1:3:2434:0:0:54:0] Put# [1:3:2435:0:0:56:0] Put# [1:3:2436:0:0:81:0] Put# [1:3:2437:0:0:2:0] Put# [1:3:2438:0:0:72:0] Put# [1:3:2439:0:0:31:0] Put# [1:3:2440:0:0:32:0] Put# [1:3:2441:0:0:56:0] Put# [1:3:2442:0:0:100:0] Put# [1:3:2443:0:0:34:0] Put# [1:3:2444:0:0:83:0] Put# [1:3:2445:0:0:84:0] Put# [1:3:2446:0:0:83:0] Put# [1:3:2447:0:0:24:0] Put# [1:3:2448:0:0:8:0] Put# [1:3:2449:0:0:19:0] Put# [1:3:2450:0:0:49:0] Put# [1:3:2451:0:0:60:0] Put# [1:3:2452:0:0:49:0] Put# [1:3:2453:0:0:28:0] Put# [1:3:2454:0:0:24:0] Put# [1:3:2455:0:0:57:0] Put# [1:3:2456:0:0:40:0] Put# [1:3:2457:0:0:93:0] Put# [1:3:2458:0:0:43:0] Put# [1:3:2459:0:0:62:0] Put# [1:3:2460:0:0:42:0] Put# [1:3:2461:0:0:43:0] Put# [1:3:2462:0:0:84:0] Put# [1:3:2463:0:0:83:0] Put# [1:3:2464:0:0:19:0] Put# [1:3:2465:0:0:19:0] Put# [1:3:2466:0:0:97:0] Put# [1:3:2467:0:0:13:0] Put# [1:3:2468:0:0:51:0] Put# [1:3:2469:0:0:48:0] Put# [1:3:2470:0:0:51:0] Put# [1:3:2471:0:0:39:0] Put# [1:3:2472:0:0:74:0] Put# [1:3:2473:0:0:2:0] Put# [1:3:2474:0:0:1:0] Put# [1:3:2475:0:0:70:0] Put# [1:3:2476:0:0:81:0] Put# [1:3:2477:0:0:81:0] Put# [1:3:2478:0:0:16:0] Put# [1:3:2479:0:0:65:0] Put# [1:3:2480:0:0:73:0] Put# [1:3:2481:0:0:39:0] Put# [1:3:2482:0:0:38:0] Put# [1:3:2483:0:0:86:0] Put# [1:3:2484:0:0:42:0] Put# [1:3:2485:0:0:100:0] Put# [1:3:2486:0:0:61:0] Put# [1:3:2487:0:0:46:0] Put# [1:3:2488:0:0:55:0] Put# [1:3:2489:0:0:73:0] Put# [1:3:2490:0:0:74:0] Put# [1:3:2491:0:0:96:0] Put# [1:3:2492:0:0:70:0] Put# [1:3:2493:0:0:16:0] Put# [1:3:2494:0:0:57:0] Put# [1:3:2495:0:0:17:0] Put# [1:3:2496:0:0:53:0] Put# [1:3:2497:0:0:19:0] Put# [1:3:2498:0:0:66:0] Put# [1:3:2499:0:0:28:0] Put# [1:3:2500:0:0:98:0] Put# [1:3:2501:0:0:92:0] Put# [1:3:2502:0:0:82:0] Put# [1:3:2503:0:0:38:0] Put# [1:3:2504:0:0:28:0] Put# [1:3:2505:0:0:86:0] Put# [1:3:2506:0:0:55:0] Put# [1:3:2507:0:0:59:0] Put# [1:3:2508:0:0:41:0] Put# [1:3:2509:0:0:52:0] Put# [1:3:2510:0:0:17:0] Put# [1:3:2511:0:0:71:0] Put# [1:3:2512:0:0:18:0] Put# [1:3:2513:0:0:38:0] Put# [1:3:2514:0:0:50:0] Put# [1:3:2515:0:0:10:0] Put# [1:3:2516:0:0:33:0] Put# [1:3:2517:0:0:71:0] Put# [1:3:2518:0:0:23:0] Put# [1:3:2519:0:0:32:0] Put# [1:3:2520:0:0:2:0] Put# [1:3:2521:0:0:64:0] Put# [1:3:2522:0:0:31:0] Put# [1:3:2523:0:0:18:0] Put# [1:3:2524:0:0:16:0] Put# [1:3:2525:0:0:55:0] Put# [1:3:2526:0:0:53:0] Put# [1:3:2527:0:0:56:0] Put# [1:3:2528:0:0:5:0] Put# [1:3:2529:0:0:67:0] Put# [1:3:2530:0:0:28:0] Put# [1:3:2531:0:0:62:0] Put# [1:3:2532:0:0:72:0] Put# [1:3:2533:0:0:2:0] Put# [1:3:2534:0:0:71:0] Put# [1:3:2535:0:0:4:0] Put# [1:3:2536:0:0:29:0] Put# [1:3:2537:0:0:87:0] Put# [1:3:2538:0:0:10:0] Put# [1:3:2539:0:0:13:0] Put# [1:3:2540:0:0:14:0] Put# [1:3:2541:0:0:86:0] Put# [1:3:2542:0:0:14:0] Put# [1:3:2543:0:0:11:0] Put# [1:3:2544:0:0:67:0] Put# [1:3:2545:0:0:69:0] Put# [1:3:2546:0:0:15:0] Put# [1:3:2547:0:0:62:0] Put# [1:3:2548:0:0:39:0] Put# [1:3:2549:0:0:86:0] Put# [1:3:2550:0:0:63:0] Put# [1:3:2551:0:0:16:0] Put# [1:3:2552:0:0:64:0] Put# [1:3:2553:0:0:98:0] Put# [1:3:2554:0:0:32:0] Put# [1:3:2555:0:0:48:0] Put# [1:3:2556:0:0:23:0] Put# [1:3:2557:0:0:78:0] Put# [1:3:2558:0:0:73:0] Put# [1:3:2559:0:0:60:0] Put# [1:3:2560:0:0:57:0] Put# [1:3:2561:0:0:44:0] Put# [1:3:2562:0:0:49:0] Put# [1:3:2563:0:0:90:0] Put# [1:3:2564:0:0:81:0] Put# [1:3:2565:0:0:4:0] Put# [1:3:2566:0:0:23:0] Put# [1:3:2567:0:0:42:0] Put# [1:3:2568:0:0:23:0] Put# [1:3:2569:0:0:61:0] Put# [1:3:2570:0:0:90:0] Put# [1:3:2571:0:0:94:0] Put# [1:3:2572:0:0:82:0] Put# [1:3:2573:0:0:43:0] Put# [1:3:2574:0:0:87:0] Put# [1:3:2575:0:0:92:0] Put# [1:3:2576:0:0:70:0] Put# [1:3:2577:0:0:35:0] Put# [1:3:2578:0:0:62:0] Put# [1:3:2579:0:0:53:0] Put# [1:3:2580:0:0:35:0] Put# [1:3:2581:0:0:20:0] Put# [1:3:2582:0:0:56:0] Put# [1:3:2583:0:0:20:0] Put# [1:3:2584:0:0:64:0] Put# [1:3:2585:0:0:67:0] Reassign# 4 -- VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 3 VDiskKind: "Default" FailDomainIdx: 4 VDiskMetrics { SatisfactionRank: 12 VSlotId { NodeId: 5 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:3:2586:0:0:62:0] Put# [1:3:2587:0:0:40:0] Put# [1:3:2588:0:0:26:0] |85.1%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |85.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/pqtablet/partition/mlp/ut/ydb-core-persqueue-pqtablet-partition-mlp-ut >> DataShardReplication::SimpleApplyChanges |85.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/pqtablet/partition/mlp/ut/ydb-core-persqueue-pqtablet-partition-mlp-ut |85.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |85.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/pqtablet/partition/mlp/ut/ydb-core-persqueue-pqtablet-partition-mlp-ut >> TestFormatHandler::ClientErrorWithEmptyFilter [GOOD] >> DescribeSchemaSecretsService::GetUnexistingValue [GOOD] >> DescribeSchemaSecretsService::GetDroppedValue |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_snapshot_isolation/ydb-core-tx-datashard-ut_snapshot_isolation >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot_isolation/ydb-core-tx-datashard-ut_snapshot_isolation |85.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot_isolation/ydb-core-tx-datashard-ut_snapshot_isolation >> TestFormatHandler::Watermark |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |85.2%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |85.2%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut >> TAllocatorSuite32::AllocationWithReclaimThreeThreads [GOOD] >> TTxDataShardBuildFulltextIndexScan::BadRequest [GOOD] >> TTxDataShardBuildFulltextIndexScan::Build |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |85.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> MediatorTest::PlanStepAckToReconnectedMediator [GOOD] >> MediatorTest::WatcherReconnect >> TMLPConsumerTests::ReloadPQTablet ------- [TM] {asan, default-linux-x86_64, release} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/gtest >> TAllocatorSuite32::AllocationWithReclaimThreeThreads [GOOD] Test command err: ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp:44: Value of: i == 8 Actual: false Expected: true AddressSanitizer:DEADLYSIGNAL ================================================================= ==44406==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x0000015e575c bp 0x7ffd9d065290 sp 0x7ffd9d065290 T0) ==44406==The signal is caused by a READ memory access. ==44406==Hint: address points to the zero page. #0 0x0000015e575c in Get /-S/util/generic/ptr.h:592:16 #1 0x0000015e575c in AsT /-S/util/generic/ptr.h:153:50 #2 0x0000015e575c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x0000015e575c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x00000059e9f1 in TAllocatorSuite32_SlotPoolHugeAlloc_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp:75:9 #5 0x000000a001e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #6 0x000000a001e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #7 0x0000009fff10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #8 0x000000a02795 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #9 0x000000a05641 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #10 0x000000a3068e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #11 0x000000a2f084 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #12 0x000000a2f084 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #13 0x000000a2ebf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #14 0x000000a8c38f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #15 0x000000a8c38f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #16 0x7f2e6503cd8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #17 0x7f2e6503ce3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x00000056e728 in _start (/home/runner/.ya/build/build_root/663n/001b2c/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32+0x56e728) (BuildId: 79bd0b28b84cc2c76fefcd15693cb73338b91a97) ==44406==Register values: rax = 0x0000000000000010 rbx = 0x00007ffd9d0652a0 rcx = 0x0000000000000002 rdx = 0x00000f65cc59be60 rdi = 0x0000000000000000 rsi = 0x00007b2e62cdf2e0 rbp = 0x00007ffd9d065290 rsp = 0x00007ffd9d065290 r8 = 0x0000000000000001 r9 = 0x0000000000000002 r10 = 0x00007d6e63fe0000 r11 = 0x0000000000000001 r12 = 0x00000f664c5d5240 r13 = 0x00007b2e62ee9220 r14 = 0x00007b2e62ee9200 r15 = 0x00007b2e62ee9200 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==44406==ABORTING AddressSanitizer:DEADLYSIGNAL ================================================================= ==44608==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x0000015e575c bp 0x7fff3591aef0 sp 0x7fff3591aef0 T0) ==44608==The signal is caused by a READ memory access. ==44608==Hint: address points to the zero page. #0 0x0000015e575c in Get /-S/util/generic/ptr.h:592:16 #1 0x0000015e575c in AsT /-S/util/generic/ptr.h:153:50 #2 0x0000015e575c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x0000015e575c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x00000059f978 in TAllocatorSuite32_SlotPoolHugeAllocAfterSmall_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp:95:9 #5 0x000000a001e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #6 0x000000a001e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #7 0x0000009fff10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #8 0x000000a02795 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #9 0x000000a05641 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #10 0x000000a3068e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #11 0x000000a2f084 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #12 0x000000a2f084 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #13 0x000000a2ebf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #14 0x000000a8c38f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #15 0x000000a8c38f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #16 0x7fd48aa56d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #17 0x7fd48aa56e3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x00000056e728 in _start (/home/runner/.ya/build/build_root/663n/001b2c/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32+0x56e728) (BuildId: 79bd0b28b84cc2c76fefcd15693cb73338b91a97) ==44608==Register values: rax = 0x0000000000000010 rbx = 0x00007fff3591af00 rcx = 0x0000000000000002 rdx = 0x00000f7a910dbdf0 rdi = 0x0000000000000000 rsi = 0x00007bd4886def60 rbp = 0x00007fff3591aef0 rsp = 0x00007fff3591aef0 r8 = 0x00007e14899e0000 r9 = 0x0000000000000000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00007bd488ae6020 r13 = 0x00007bd488ae6040 r14 = 0x0000000000000020 r15 = 0x00000f7b11154c00 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==44608==ABORTING AddressSanitizer:DEADLYSIGNAL ================================================================= ==44748==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000010 (pc 0x0000015e575c bp 0x7ffdb218f270 sp 0x7ffdb218f270 T0) ==44748==The signal is caused by a READ memory access. ==44748==Hint: address points to the zero page. #0 0x0000015e575c in Get /-S/util/generic/ptr.h:592:16 #1 0x0000015e575c in AsT /-S/util/generic/ptr.h:153:50 #2 0x0000015e575c in operator-> /-S/util/generic/ptr.h:118:18 #3 0x0000015e575c in NInterconnect::NRdma::TMemRegion::GetAddr() const /-S/ydb/library/actors/interconnect/rdma/mem_pool.cpp:222:20 #4 0x0000005a0bd8 in TAllocatorSuite32_SlotPoolHugeAllocOtherThreadAfterSmall_Test::TestBody() /-S/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp:119:9 #5 0x000000a001e4 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #6 0x000000a001e4 in void testing::internal::HandleExceptionsInMethodIfSupported(testing::Test*, void (testing::Test::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #7 0x0000009fff10 in testing::Test::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2739:5 #8 0x000000a02795 in testing::TestInfo::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2885:11 #9 0x000000a05641 in testing::TestSuite::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:3063:30 #10 0x000000a3068e in testing::internal::UnitTestImpl::RunAllTests() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:6054:44 #11 0x000000a2f084 in HandleSehExceptionsInMethodIfSupported /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2664:10 #12 0x000000a2f084 in bool testing::internal::HandleExceptionsInMethodIfSupported(testing::internal::UnitTestImpl*, bool (testing::internal::UnitTestImpl::*)(), char const*) /-S/contrib/restricted/googletest/googletest/src/gtest.cc:2700:14 #13 0x000000a2ebf6 in testing::UnitTest::Run() /-S/contrib/restricted/googletest/googletest/src/gtest.cc:5594:10 #14 0x000000a8c38f in RUN_ALL_TESTS /-S/contrib/restricted/googletest/googletest/include/gtest/gtest.h:2334:73 #15 0x000000a8c38f in NGTest::Main(int, char**) /-S/library/cpp/testing/gtest/main.cpp:274:12 #16 0x7f45fb5ead8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #17 0x7f45fb5eae3f in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29e3f) (BuildId: 4f7b0c955c3d81d7cac1501a2498b69d1d82bfe7) #18 0x00000056e728 in _start (/home/runner/.ya/build/build_root/663n/001b2c/ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/interconnect-rdma-ut_mem_pool_limit_32+0x56e728) (BuildId: 79bd0b28b84cc2c76fefcd15693cb73338b91a97) ==44748==Register values: rax = 0x0000000000000010 rbx = 0x00007ffdb218f280 rcx = 0x0000000000000002 rdx = 0x00000f68bf23bdf0 rdi = 0x0000000000000000 rsi = 0x00007b45f91def60 rbp = 0x00007ffdb218f270 rsp = 0x00007ffdb218f270 r8 = 0x00007d85fa4e0000 r9 = 0x0000000000000000 r10 = 0x00007fffffffff01 r11 = 0x0000000000000001 r12 = 0x00007b45f94ed620 r13 = 0x0000000000000020 r14 = 0x00007b45f94ed640 r15 = 0x00000f693f295ac0 AddressSanitizer can not provide additional info. SUMMARY: AddressSanitizer: SEGV /-S/util/generic/ptr.h:592:16 in Get ==44748==ABORTING ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/allocator_ut.cpp:157: Value of: memRegion Actual: false Expected: true allocation failed === Average time per allocation t0: 25493 us, t1: 25351.6 us Average time per allocation t0: 152.851 us, t1: 208.769 us Average time per allocation t0: 179.161 us, t1: 132.023 us Average time per allocation t0: 110.397 us, t1: 171.04 us Average time per allocation t0: 166.994 us, t1: 129.978 us Average time per allocation t0: 137.214 us, t1: 222.791 us Average time per allocation t0: 129.33 us, t1: 134.006 us Average time per allocation t0: 104.243 us, t1: 172.975 us Average time per allocation t0: 105.816 us, t1: 80.367 us Average time per allocation t0: 62.467 us, t1: 109.298 us === Average time per allocation t0: 15.8625 us, t1: 7.63163 us Average time per allocation t0: 11.6832 us, t1: 7.02256 us Average time per allocation t0: 8.89245 us, t1: 5.35415 us Average time per allocation t0: 9.36722 us, t1: 6.02042 us Average time per allocation t0: 12.3316 us, t1: 9.51314 us Average time per allocation t0: 14.1404 us, t1: 6.80574 us Average time per allocation t0: 9.95874 us, t1: 6.29743 us Average time per allocation t0: 6.80527 us, t1: 3.80512 us Average time per allocation t0: 4.87047 us, t1: 2.34465 us Average time per allocation t0: 6.06184 us, t1: 3.36058 us === Average time per allocation t0: 17.2741 us, t1: 11.3293 us Average time per allocation t0: 10.3055 us, t1: 10.102 us Average time per allocation t0: 8.5295 us, t1: 10.6336 us Average time per allocation t0: 16.6739 us, t1: 16.56 us Average time per allocation t0: 12.8317 us, t1: 5.3972 us Average time per allocation t0: 30.4968 us, t1: 24.1308 us Average time per allocation t0: 2.9267 us, t1: 10.85 us Average time per allocation t0: 35.6602 us, t1: 34.1679 us Average time per allocation t0: 38.9674 us, t1: 63.881 us Average time per allocation t0: 6.8522 us, t1: 35.0323 us === Average time per allocation t0: 98.1955 us, t1: 32.102 us Average time per allocation t0: 37.65 us, t1: 44.4526 us Average time per allocation t0: 25.9047 us, t1: 11.8268 us Average time per allocation t0: 6.9866 us, t1: 12.6771 us Average time per allocation t0: 19.348 us, t1: 5.3667 us Average time per allocation t0: 5.8866 us, t1: 7.4911 us Average time per allocation t0: 15.0043 us, t1: 4.962 us Average time per allocation t0: 4.7827 us, t1: 12.713 us Average time per allocation t0: 21.0645 us, t1: 5.7196 us Average time per allocation t0: 7.8815 us, t1: 12.1061 us === Average time per allocation t0: 7.4944 us, t1: 12.5587 us Average time per allocation t0: 16.3773 us, t1: 6.2794 us Average time per allocation t0: 11.1321 us, t1: 18.6016 us Average time per allocation t0: 40.1872 us, t1: 46.5289 us Average time per allocation t0: 24.4029 us, t1: 12.2515 us Average time per allocation t0: 5.1619 us, t1: 19.0363 us Average time per allocation t0: 7.7283 us, t1: 4.3345 us Average time per allocation t0: 7.0246 us, t1: 4.7218 us Average time per allocation t0: 3.6122 us, t1: 7.213 us Average time per allocation t0: 6.5817 us, t1: 2.8588 us === Average time per allocation t0: 6.3635 us, t1: 12.3508 us Average time per allocation t0: 17.6031 us, t1: 4.3443 us Average time per allocation t0: 6.5963 us, t1: 10.9462 us Average time per allocation t0: 16.9375 us, t1: 22.2676 us Average time per allocation t0: 17.6785 us, t1: 5.5756 us Average time per allocation t0: 7.4372 us, t1: 13.3574 us Average time per allocation t0: 21.0935 us, t1: 5.4189 us Average time per allocation t0: 8.6964 us, t1: 12.2316 us Average time per allocation t0: 14.5983 us, t1: 7.668 us Average time per allocation t0: 29.1963 us, t1: 12.8052 us === Average time per allocation t0: 645.02 us, t1: 1786.95 us, t2: 742.71 us Average time per allocation t0: 24716.6 us, t1: 25401 us, t2: 23697.6 us Average time per allocation t0: 751.11 us, t1: 467.28 us, t2: 763.14 us Average time per allocation t0: 863.19 us, t1: 272.67 us, t2: 804.25 us Average time per allocation t0: 721.72 us, t1: 857.51 us, t2: 284.74 us Average time per allocation t0: 894.74 us, t1: 849.53 us, t2: 470.19 us Average time per allocation t0: 157.95 us, t1: 550.34 us, t2: 690.03 us Average time per allocation t0: 845.93 us, t1: 604.63 us, t2: 336.23 us Average time per allocation t0: 1299.33 us, t1: 739.95 us, t2: 2101.57 us Average time per allocation t0: 3807.84 us, t1: 5167.99 us, t2: 1189.81 us === Average time per allocation t0: 87.9232 us, t1: 63.3234 us, t2: 38.8789 us Average time per allocation t0: 91.2719 us, t1: 75.637 us, t2: 39.8466 us Average time per allocation t0: 41.5072 us, t1: 6.4663 us, t2: 3.7943 us Average time per allocation t0: 233.536 us, t1: 139.57 us, t2: 82.1735 us Average time per allocation t0: 332.036 us, t1: 209.071 us, t2: 181.82 us Average time per allocation t0: 301.721 us, t1: 271.445 us, t2: 222.791 us Average time per allocation t0: 188.742 us, t1: 129.434 us, t2: 119.66 us Average time per allocation t0: 143.878 us, t1: 111.615 us, t2: 101.639 us Average time per allocation t0: 122.4 us, t1: 85.5107 us, t2: 41.4177 us Average time per allocation t0: 158.659 us, t1: 108.013 us, t2: 42.7136 us === Average time per allocation t0: 57.9491 us, t1: 27.0442 us, t2: 42.664 us Average time per allocation t0: 49.1966 us, t1: 19.402 us, t2: 19.9269 us Average time per allocation t0: 139.544 us, t1: 133.086 us, t2: 103.793 us Average time per allocation t0: 40.244 us, t1: 83.8114 us, t2: 25.7291 us Average time per allocation t0: 9.9662 us, t1: 59.571 us, t2: 66.6723 us Average time per allocation t0: 34.5189 us, t1: 42.2059 us, t2: 39.2658 us Average time per allocation t0: 27.4716 us, t1: 19.5446 us, t2: 11.6869 us Average time per allocation t0: 27.2569 us, t1: 17.6882 us, t2: 14.7563 us Average time per allocation t0: 59.1228 us, t1: 70.3533 us, t2: 39.7907 us Average time per allocation t0: 19.9096 us, t1: 57.669 us, t2: 45.7514 us === Average time per allocation t0: 40.5502 us, t1: 13.3359 us, t2: 8.21021 us Average time per allocation t0: 30.93 us, t1: 11.8504 us, t2: 5.47766 us Average time per allocation t0: 18.2368 us, t1: 10.7245 us, t2: 5.80042 us Average time per allocation t0: 14.7996 us, t1: 18.8594 us, t2: 4.20886 us Average time per allocation t0: 18.1414 us, t1: 13.2065 us, t2: 5.2373 us Average time per allocation t0: 25.7207 us, t1: 17.327 us, t2: 6.6189 us Average time per allocation t0: 24.8878 us, t1: 30.0349 us, t2: 10.0334 us Average time per allocation t0: 65.785 us, t1: 4.4876 us, t2: 12.287 us Average time per allocation t0: 24.9396 us, t1: 26.926 us, t2: 15.1248 us Average time per allocation t0: 54.4201 us, t1: 69.2711 us, t2: 12.1125 us |85.2%| [TM] {BAZEL_UPLOAD} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/gtest >> TestFormatHandler::Watermark [GOOD] >> TestFormatHandler::WatermarkWhere >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] >> DataShardSnapshotIsolation::ReadWriteNoLocksNoConflict >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> TabletService_ChangeSchema::Basics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] Test command err: RandomSeed# 4900057801735407197 |85.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BridgeGet::PartRestorationAcrossBridgeOnRange [GOOD] >> KeyValueGRPCService::TestSimpleExecuteTransactionV1 >> GenericProviderLookupActor::Lookup [GOOD] >> GenericProviderLookupActor::LookupMultiMatches [GOOD] >> GenericProviderLookupActor::LookupWithErrors >> GenericProviderLookupActor::LookupWithErrors [GOOD] |85.2%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |85.2%| [TM] {RESULT} ydb/library/actors/interconnect/rdma/ut_mem_pool_limit_32/gtest |85.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest >> BridgeGet::PartRestorationAcrossBridgeOnRange [GOOD] Test command err: RandomSeed# 14689360777137906371 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 0 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 1 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 2 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 3 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 4 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 5 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 6 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 0 mask2# 7 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 0 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 1 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 2 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 2 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 1 reverse# 1 mask1# 1 mask2# 2 mask3# 2 *** performing bridge rang ... sk2# 5 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 5 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 5 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 6 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 6 mask2# 7 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 0 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 1 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 2 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 3 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 4 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 5 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 6 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 0 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 1 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 2 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 3 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 4 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 5 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 6 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 indexOnly# 0 reverse# 0 mask1# 7 mask2# 7 mask3# 7 *** performing bridge range *** reading from i# 0 *** reading from i# 1 *** reading from i# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] Test command err: 2026-01-08T15:26:07.959223Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.959442Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:07.959808Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:07.984000Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.314939Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.318178Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.322304Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.322819Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.326448Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.330491Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.330748Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:26:08.340489Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.340522Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.340544Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.343366Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.344812Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.345187Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.349154Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.349661Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.349828Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.350206Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.350243Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:26:08.352602Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.352634Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.352662Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.352938Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.353612Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.353765Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.354483Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.355386Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.355553Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.355849Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.355892Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:26:08.356908Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.356947Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.356976Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.357256Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.357732Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.357878Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.358136Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.359962Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.360848Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.361499Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.361551Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:26:08.362492Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.362518Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.364550Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.364956Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.365565Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.365696Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.366192Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.366620Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.366740Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.366825Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.366867Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:26:08.367526Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.367548Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.367585Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.367896Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.378272Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.378475Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.381323Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.383089Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.383382Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.383555Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.383798Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:26:08.386169Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.386252Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.386274Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.389004Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.391441Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.392471Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.393343Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.394373Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.395192Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.395303Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.395345Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:26:08.396477Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.396495Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.396525Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:08.398504Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:26:08.403598Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:26:08.404438Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.404804Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:26:08.412557Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:08.413002Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:26:08.413095Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:26:08.413139Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:26:08.477043Z :ReadSession INFO: Random seed for debugging is 1767885968477010 2026-01-08T15:26:09.391346Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012422556004694:2090];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:09.392261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:09.621438Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:09.639023Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593012422225068492:2217];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:09.639167Z node 2 :M ... ompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:27:07.504283Z :NOTICE: [/Root] [/Root] [43ac4dc5-d63539e3-1af875d1-468f5b7d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:27:07.504325Z :DEBUG: [/Root] [/Root] [43ac4dc5-d63539e3-1af875d1-468f5b7d] [dc1] Abort session to cluster 2026-01-08T15:27:07.513920Z :NOTICE: [/Root] [/Root] [43ac4dc5-d63539e3-1af875d1-468f5b7d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:27:07.518522Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:07.518549Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.518559Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:07.518572Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.518581Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:07.590607Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_9172375733264502320_v1 grpc read done: success# 0, data# { } 2026-01-08T15:27:07.590637Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_9172375733264502320_v1 grpc read failed 2026-01-08T15:27:07.622554Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:07.622586Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.622597Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:07.622617Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.622627Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:07.590665Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_9172375733264502320_v1 grpc closed 2026-01-08T15:27:07.590700Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_1_1_9172375733264502320_v1 is DEAD 2026-01-08T15:27:07.662591Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_9172375733264502320_v1 2026-01-08T15:27:07.662636Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [1:7593012594354699478:2550] destroyed 2026-01-08T15:27:07.662701Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_9172375733264502320_v1 2026-01-08T15:27:07.690430Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [1:7593012594354699475:2547] disconnected. 2026-01-08T15:27:07.690463Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [1:7593012594354699475:2547] disconnected; active server actors: 1 2026-01-08T15:27:07.690886Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [1:7593012594354699475:2547] client user disconnected session shared/user_1_1_9172375733264502320_v1 2026-01-08T15:27:07.726532Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:07.726565Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.726577Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:07.726594Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.726605Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:07.826799Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:07.826828Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.826839Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:07.826854Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.826867Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:07.927483Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:07.927507Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.927516Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:07.927528Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:07.927536Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.030427Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.030457Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.030467Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.030482Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.030492Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.138638Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.138662Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.138671Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.138684Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.138691Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.238777Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.238800Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.238809Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.238823Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.238833Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.338481Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.338505Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.338516Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.338531Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.338539Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.442666Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.442689Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.442700Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.442715Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.442725Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:08.550393Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:27:08.550420Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.550431Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:27:08.550447Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:27:08.550458Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:27:09.457960Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [1:7593012680254046120:2664] TxId: 281474976715690. Ctx: { TraceId: 01kef3g09e25rw371x89vj1cy6, Database: /Root, SessionId: ydb://session/3?node_id=1&id=OWQ3MmQwMmUtNWE1YTkwZjktNTEzN2Y0ZjItNTNiN2Y4MDQ=, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 trace_id# 2026-01-08T15:27:09.458549Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [1:7593012680254046124:2664], TxId: 281474976715690, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3g09e25rw371x89vj1cy6. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=1&id=OWQ3MmQwMmUtNWE1YTkwZjktNTEzN2Y0ZjItNTNiN2Y4MDQ=. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [1:7593012680254046120:2664], status: UNAVAILABLE, reason: {
: Error: Terminate execution } |85.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_bridge/unittest |85.2%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/actors/ut/unittest >> GenericProviderLookupActor::LookupWithErrors [GOOD] Test command err: 2026-01-08 15:27:11.113 INFO ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007F145DC34000) [generic] yql_generic_lookup_actor.cpp:153: New generic proivider lookup source actor(ActorId=[1:4:2051]) for kind=YDB, endpoint=host: "some_host" port: 2135, database=some_db, use_tls=1, protocol=NATIVE, table=lookup_test 2026-01-08 15:27:11.126 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007F145DC34000) [generic] yql_generic_lookup_actor.cpp:301: ActorId=[1:4:2051] Got LookupRequest for 3 keys Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 GENERIC-CONNECTOR-MOCK Expected: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 GENERIC-CONNECTOR-MOCK Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } ... "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 GENERIC-CONNECTOR-MOCK Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 ListSplits result. GRpcStatusCode: 0 2026-01-08 15:27:11.216 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:332: ActorId=[3:7593012688127658034:2051] Got TListSplitsStreamIterator 2026-01-08 15:27:11.216 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:200: ActorId=[3:7593012688127658034:2051] Got TListSplitsResponse from Connector Call ReadSplits. data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY GENERIC-CONNECTOR-MOCK Expected: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY GENERIC-CONNECTOR-MOCK Actual: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "token_value" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY ReadSplits result. GRpcStatusCode: 0 2026-01-08 15:27:11.216 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:233: ActorId=[3:7593012688127658034:2051] Got ReadSplitsStreamIterator from Connector 2026-01-08 15:27:11.217 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:354: ActorId=[3:7593012688127658034:2051] Got DataChunk 2026-01-08 15:27:11.217 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:365: ActorId=[3:7593012688127658034:2051] Got EOF 2026-01-08 15:27:11.218 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=58246, tid=0x00007B1458D56640) [generic] yql_generic_lookup_actor.cpp:419: Sending lookup results for 3 keys |85.2%| [TS] {BAZEL_UPLOAD} ydb/library/yql/providers/generic/actors/ut/unittest >> TTxDataShardBuildFulltextIndexScan::Build [GOOD] >> TTxDataShardBuildFulltextIndexScan::BuildWithData >> MediatorTest::WatcherReconnect [GOOD] >> TestFormatHandler::WatermarkWhere [GOOD] >> DescribeSchemaSecretsService::GetDroppedValue [GOOD] >> DescribeSchemaSecretsService::GetInParallel >> TestFormatHandler::WatermarkWhereFalse |85.2%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |85.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |85.2%| [TS] {RESULT} ydb/library/yql/providers/generic/actors/ut/unittest |85.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_bridge/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats >> MediatorTest::MultipleSteps >> DataShardReplication::SimpleApplyChanges [GOOD] >> DataShardReplication::SplitMergeChanges >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/config/ut/ydb-services-config-ut |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut |85.3%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_compaction/unittest >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:26:10.586657Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:10.727770Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:26:10.728076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:26:10.728224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:10.997043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:10.997161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:11.076432Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767885966927985 != 1767885966927989 2026-01-08T15:26:11.090660Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:11.137435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:11.236955Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:26:11.556463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:11.570149Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:11.671086Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:26:11.671184Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:26:11.671292Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:861:2744] 2026-01-08T15:26:11.839248Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:861:2744] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:26:11.839349Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:861:2744] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:26:11.840013Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:26:11.840108Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:861:2744] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:26:11.840417Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:26:11.840545Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:861:2744] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:26:11.840692Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:861:2744] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:26:11.842670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:11.843178Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:26:11.843898Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:861:2744] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:26:11.843961Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:861:2744] txid# 281474976715657 SEND to# [1:829:2724] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:26:11.883194Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:26:11.884171Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:26:11.884441Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:26:11.884741Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:26:11.937753Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:26:11.938772Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:26:11.939014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:26:11.940670Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:26:11.940761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:26:11.940825Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:26:11.941174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:26:11.941314Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:26:11.941396Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:26:11.958923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:26:12.012974Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:26:12.013171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:26:12.013283Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:26:12.013325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:26:12.013359Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:26:12.013398Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:12.013632Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:12.013687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:26:12.014035Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:26:12.014119Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:26:12.014193Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:26:12.014247Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:26:12.014290Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:26:12.014343Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:26:12.014378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:26:12.014414Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:26:12.014463Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:26:12.014897Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:26:12.014944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:26:12.014991Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:26:12.015079Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:26:12.015117Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:26:12.015206Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:26:12.015406Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:26:12.015448Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:26:12.015530Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:26:12.015573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status ... 1474976715661] at 72075186224037892 is DelayComplete 2026-01-08T15:27:10.972932Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037892 executing on unit CompleteOperation 2026-01-08T15:27:10.972960Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [72500:281474976715661] at 72075186224037892 to execution unit CompletedOperations 2026-01-08T15:27:10.972986Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037892 on unit CompletedOperations 2026-01-08T15:27:10.973018Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037892 is Executed 2026-01-08T15:27:10.973069Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037892 executing on unit CompletedOperations 2026-01-08T15:27:10.973098Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [72500:281474976715661] at 72075186224037892 has finished 2026-01-08T15:27:10.973128Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:27:10.973154Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037892 2026-01-08T15:27:10.973186Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2026-01-08T15:27:10.973213Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037892 2026-01-08T15:27:10.983964Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2026-01-08T15:27:10.984045Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2026-01-08T15:27:10.984081Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [72500:281474976715661] at 72075186224037892 on unit CompleteOperation 2026-01-08T15:27:10.984130Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [72500 : 281474976715661] from 72075186224037892 at tablet 72075186224037892 send result to client [2:1647:3440], exec latency: 0 ms, propose latency: 1 ms 2026-01-08T15:27:10.984180Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2026-01-08T15:27:10.984333Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1647:3440] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2026-01-08T15:27:10.984379Z node 2 :TX_PROXY DEBUG: datareq.cpp:2968: Send stream clearance, shard: 72075186224037890, txid: 281474976715661, cleared: 1 2026-01-08T15:27:10.984519Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287940, Sender [2:1647:3440], Recipient [2:983:2833]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715661 Cleared: true 2026-01-08T15:27:10.984557Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2026-01-08T15:27:10.984636Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [2:983:2833], Recipient [2:983:2833]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:10.984666Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:10.984721Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2026-01-08T15:27:10.984755Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:27:10.984792Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [72500:281474976715661] at 72075186224037890 for WaitForStreamClearance 2026-01-08T15:27:10.984821Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037890 on unit WaitForStreamClearance 2026-01-08T15:27:10.984852Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [72500:281474976715661] at 72075186224037890 2026-01-08T15:27:10.984907Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037890 is Executed 2026-01-08T15:27:10.984941Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037890 executing on unit WaitForStreamClearance 2026-01-08T15:27:10.984970Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [72500:281474976715661] at 72075186224037890 to execution unit ReadTableScan 2026-01-08T15:27:10.984999Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037890 on unit ReadTableScan 2026-01-08T15:27:10.985190Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037890 is Continue 2026-01-08T15:27:10.985218Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:27:10.985245Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2026-01-08T15:27:10.985272Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2026-01-08T15:27:10.985301Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2026-01-08T15:27:10.985691Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435082, Sender [2:1679:3469], Recipient [2:983:2833]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2026-01-08T15:27:10.985729Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3183: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2026-01-08T15:27:10.985992Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:27:10.986080Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:27:11.045874Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037890 2026-01-08T15:27:11.045936Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037890 2026-01-08T15:27:11.046089Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [2:983:2833], Recipient [2:983:2833]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:11.046126Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:11.046184Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2026-01-08T15:27:11.046220Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:27:11.046255Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [72500:281474976715661] at 72075186224037890 for ReadTableScan 2026-01-08T15:27:11.046283Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037890 on unit ReadTableScan 2026-01-08T15:27:11.046337Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [72500:281474976715661] at 72075186224037890 error: , IsFatalError: 0 2026-01-08T15:27:11.046378Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037890 is Executed 2026-01-08T15:27:11.046409Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037890 executing on unit ReadTableScan 2026-01-08T15:27:11.046439Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [72500:281474976715661] at 72075186224037890 to execution unit CompleteOperation 2026-01-08T15:27:11.046466Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037890 on unit CompleteOperation 2026-01-08T15:27:11.046642Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037890 is DelayComplete 2026-01-08T15:27:11.046674Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037890 executing on unit CompleteOperation 2026-01-08T15:27:11.046702Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [72500:281474976715661] at 72075186224037890 to execution unit CompletedOperations 2026-01-08T15:27:11.046729Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [72500:281474976715661] at 72075186224037890 on unit CompletedOperations 2026-01-08T15:27:11.046761Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [72500:281474976715661] at 72075186224037890 is Executed 2026-01-08T15:27:11.046785Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [72500:281474976715661] at 72075186224037890 executing on unit CompletedOperations 2026-01-08T15:27:11.046821Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [72500:281474976715661] at 72075186224037890 has finished 2026-01-08T15:27:11.046848Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:27:11.046874Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2026-01-08T15:27:11.046900Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2026-01-08T15:27:11.046924Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2026-01-08T15:27:11.058704Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:27:11.058766Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:27:11.058802Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [72500:281474976715661] at 72075186224037890 on unit CompleteOperation 2026-01-08T15:27:11.058853Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [72500 : 281474976715661] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1647:3440], exec latency: 1 ms, propose latency: 1 ms 2026-01-08T15:27:11.058897Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:27:11.059087Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1647:3440] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037890 marker# P12 2026-01-08T15:27:11.059172Z node 2 :TX_PROXY INFO: datareq.cpp:834: Actor# [2:1647:3440] txid# 281474976715661 RESPONSE Status# ExecComplete prepare time: 0.000500s execute time: 0.001500s total time: 0.002000s marker# P13 |85.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_compaction/unittest >> TestFormatHandler::WatermarkWhereFalse [GOOD] >> Interconnect::SessionContinuation [GOOD] >> Interconnect::SetupRdmaSession >> TestJsonParser::Simple1 >> Interconnect::SetupRdmaSession [GOOD] >> LargeMessage::Test >> TestJsonParser::Simple1 [GOOD] >> TestJsonParser::Simple2 >> MediatorTest::MultipleSteps [GOOD] >> TestJsonParser::Simple2 [GOOD] >> TestJsonParser::Simple3 >> MediatorTest::WatchesBeforeFirstStep >> TestJsonParser::Simple3 [GOOD] >> TestJsonParser::Simple4 >> TTxDataShardBuildFulltextIndexScan::BuildWithData [GOOD] >> TTxDataShardBuildFulltextIndexScan::BuildWithTextData >> TestJsonParser::Simple4 [GOOD] >> TCreateAndDropViewTest::CheckCreatedView >> TestJsonParser::LargeStrings >> TestJsonParser::LargeStrings [GOOD] >> TestJsonParser::ManyValues >> TestJsonParser::ManyValues [GOOD] >> TestJsonParser::MissingFields >> TestJsonParser::MissingFields [GOOD] >> KeyValueGRPCService::TestSimpleExecuteTransactionV1 [GOOD] >> KeyValueGRPCService::TestSimpleExecuteTransactionV2 >> CompressExecutor::TestReorderedExecutor [GOOD] >> CompressExecutor::TestExecutorMemUsage >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> TestJsonParser::NestedTypes >> TestJsonParser::NestedTypes [GOOD] >> TestJsonParser::SimpleBooleans >> TestJsonParser::SimpleBooleans [GOOD] >> TestJsonParser::ChangeParserSchema >> TestJsonParser::ChangeParserSchema [GOOD] >> TestJsonParser::ManyBatches >> TestJsonParser::ManyBatches [GOOD] >> TestJsonParser::LittleBatches >> TestJsonParser::LittleBatches [GOOD] >> TestJsonParser::MissingFieldsValidation >> TestJsonParser::MissingFieldsValidation [GOOD] >> TestJsonParser::TypeKindsValidation >> BasicUsage::BrokenCredentialsProvider [GOOD] >> TestJsonParser::TypeKindsValidation [GOOD] >> BasicUsage::CreateTopicWithCustomName >> TabletService_ChangeSchema::Basics [GOOD] >> TabletService_ChangeSchema::OnlyAdminsAllowed >> TestJsonParser::NumbersValidation >> MediatorTest::WatchesBeforeFirstStep [GOOD] >> DataShardSnapshotIsolation::ReadWriteNoLocksNoConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteConflictOnUncommittedWrite >> DescribeSchemaSecretsService::GetInParallel [GOOD] >> DescribeSchemaSecretsService::GetSameValueMultipleTimes >> MediatorTest::RebootTargetTablets >> TestJsonParser::NumbersValidation [GOOD] >> TestJsonParser::StringsValidation >> TestJsonParser::StringsValidation [GOOD] >> TestJsonParser::NestedJsonValidation >> TTxDataShardBuildFulltextIndexScan::BuildWithTextData [GOOD] >> TTxDataShardBuildFulltextIndexScan::BuildWithTextFromKey >> DataShardStats::OneChannelStatsCorrect >> TestJsonParser::NestedJsonValidation [GOOD] >> TestJsonParser::BoolsValidation >> TestJsonParser::BoolsValidation [GOOD] >> ConfigGRPCService::ReplaceConfig >> TestJsonParser::JsonStructureValidation >> DataShardReplication::SplitMergeChanges [GOOD] >> DataShardReplication::SplitMergeChangesReboots >> TestJsonParser::JsonStructureValidation [GOOD] >> TestJsonParser::SkipErrors_Simple1 >> TestJsonParser::SkipErrors_Simple1 [GOOD] >> TestJsonParser::SkipErrors_StringValidation >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> TCheckpointCoordinatorTests::ShouldDoNothingIfNoIngressTasks [GOOD] >> TestJsonParser::SkipErrors_StringValidation [GOOD] >> TestJsonParser::SkipErrors_NoField >> TestJsonParser::SkipErrors_NoField [GOOD] >> TestJsonParser::SkipErrors_NoJson >> TestJsonParser::SkipErrors_NoJson [GOOD] >> TestJsonParser::SkipErrors_Optional >> TestJsonParser::SkipErrors_Optional [GOOD] |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> TestJsonParser::SkipErrors1JsonIn2Messages |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> TestJsonParser::SkipErrors1JsonIn2Messages [GOOD] |85.3%| [TM] {RESULT} ydb/core/tx/datashard/ut_compaction/unittest |85.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> TestPurecalcFilter::Simple1 >> LargeMessage::Test [GOOD] >> OutgoingStream::Basic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpointing/ut/unittest >> TCheckpointCoordinatorTests::ShouldDoNothingIfNoIngressTasks [GOOD] Test command err: 2026-01-08T15:25:04.347136Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:70: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT, checkpointing period 3600.000000s 2026-01-08T15:25:04.347262Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:104: [my-graph-id.42] AllActors count: 3, ActorsToTrigger count: 1, ActorsToNotify count: 2, ActorsToWaitFor count: 3 2026-01-08T15:25:04.347310Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:114: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2026-01-08T15:25:04.347591Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:131: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2026-01-08T15:25:04.347618Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:140: [my-graph-id.42] Successfully registered in storage 2026-01-08T15:25:04.347654Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:141: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2026-01-08T15:25:04.347714Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:149: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2026-01-08T15:25:04.356291Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:186: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2026-01-08T15:25:04.356338Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:212: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2026-01-08T15:25:04.356368Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:349: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2026-01-08T15:25:04.366037Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:389: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2026-01-08T15:25:04.366108Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:435: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2026-01-08T15:25:04.366164Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:445: [my-graph-id.42] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2026-01-08T15:25:04.366334Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.366372Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2026-01-08T15:25:04.366412Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.366441Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2026-01-08T15:25:04.366494Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.366527Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2026-01-08T15:25:04.366554Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:489: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2026-01-08T15:25:04.366636Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:501: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2026-01-08T15:25:04.366669Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:516: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states to 2 actor(s) Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2026-01-08T15:25:04.366769Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2026-01-08T15:25:04.366804Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:1] State committed [1:6:2053], need 1 more acks 2026-01-08T15:25:04.366858Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2026-01-08T15:25:04.366890Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:1] State committed [1:8:2055], need 0 more acks 2026-01-08T15:25:04.366918Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:543: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2026-01-08T15:25:04.366981Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2026-01-08T15:25:04.367021Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:579: [my-graph-id.42] [42:1] Checkpoint completed 2026-01-08T15:25:04.367063Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:649: [my-graph-id.42] Got TEvRunGraph 2026-01-08T15:25:04.507054Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:70: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT, checkpointing period 3600.000000s 2026-01-08T15:25:04.507163Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:104: [my-graph-id.42] AllActors count: 3, ActorsToTrigger count: 1, ActorsToNotify count: 2, ActorsToWaitFor count: 3 2026-01-08T15:25:04.507217Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:114: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2026-01-08T15:25:04.507487Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:131: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2026-01-08T15:25:04.507523Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:140: [my-graph-id.42] Successfully registered in storage 2026-01-08T15:25:04.507554Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:141: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2026-01-08T15:25:04.507615Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:149: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2026-01-08T15:25:04.507766Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:186: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2026-01-08T15:25:04.507797Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:212: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2026-01-08T15:25:04.507834Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:349: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2026-01-08T15:25:04.507955Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:389: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2026-01-08T15:25:04.508015Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:435: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2026-01-08T15:25:04.508071Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:445: [my-graph-id.42] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2026-01-08T15:25:04.508206Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.508243Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2026-01-08T15:25:04.508284Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.508370Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2026-01-08T15:25:04.508412Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.508444Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2026-01-08T15:25:04.508472Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:489: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2026-01-08T15:25:04.508552Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:501: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2026-01-08T15:25:04.508588Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:516: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states to 2 actor(s) Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2026-01-08T15:25:04.508721Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2026-01-08T15:25:04.508763Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:1] State committed [2:6:2053], need 1 more acks 2026-01-08T15:25:04.508798Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2026-01-08T15:25:04.508831Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:1] State committed [2:8:2055], need 0 more acks 2026-01-08T15:25:04.508872Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:543: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2026-01-08T15:25:04.508929Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2026-01-08T15:25:04.508958Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:579: [my-graph-id.42] [42:1] Checkpoint completed 2026-01-08T15:25:04.508997Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:649: [my-graph-id.42] Got TEvRunGraph 2026-01-08T15:25:04.694883Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:70: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT, checkpointing period 3600.000000s 2026-01-08T15:25:04.694987Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:104: [my-graph-id.42] AllActors count: 3, ActorsToTrigger count: 1, ActorsToNotify count: 2, ActorsToWaitFor count: 3 2026-01-08T15:25:04.695025Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:114: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Wa ... -01-08T15:25:04.913218Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:3] Task state saved, need 1 more acks 2026-01-08T15:25:04.913254Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.913284Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:3] Task state saved, need 0 more acks 2026-01-08T15:25:04.913321Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:489: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2026-01-08T15:25:04.913371Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:501: [my-graph-id.42] [42:3] Got TEvSetCheckpointPendingCommitStatusResponse 2026-01-08T15:25:04.913399Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:516: [my-graph-id.42] [42:3] Checkpoint status changed to 'PendingCommit', committing states to 2 actor(s) Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2026-01-08T15:25:04.913485Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 1 2026-01-08T15:25:04.913515Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:3] State committed [4:6:2053], need 1 more acks 2026-01-08T15:25:04.913548Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 3 2026-01-08T15:25:04.913592Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:3] State committed [4:8:2055], need 0 more acks 2026-01-08T15:25:04.913618Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:543: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2026-01-08T15:25:04.913697Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:3] Got TEvCompleteCheckpointResponse 2026-01-08T15:25:04.913729Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:579: [my-graph-id.42] [42:3] Checkpoint completed 2026-01-08T15:25:04.913760Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:373: [my-graph-id.42] Got TEvScheduleCheckpointing 2026-01-08T15:25:04.913803Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:349: [my-graph-id.42] [42:4] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2026-01-08T15:25:04.913850Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:389: [my-graph-id.42] [42:4] Got TEvCreateCheckpointResponse 2026-01-08T15:25:04.913879Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:435: [my-graph-id.42] [42:4] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2026-01-08T15:25:04.913939Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.913967Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:4] Task state saved, need 2 more acks 2026-01-08T15:25:04.914004Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.914032Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:4] Task state saved, need 1 more acks 2026-01-08T15:25:04.914068Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:04.914094Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:4] Task state saved, need 0 more acks 2026-01-08T15:25:04.914120Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:489: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2026-01-08T15:25:04.914167Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:501: [my-graph-id.42] [42:4] Got TEvSetCheckpointPendingCommitStatusResponse 2026-01-08T15:25:04.914214Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:516: [my-graph-id.42] [42:4] Checkpoint status changed to 'PendingCommit', committing states to 2 actor(s) Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2026-01-08T15:25:04.914313Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 1 2026-01-08T15:25:04.914387Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:4] State committed [4:6:2053], need 1 more acks 2026-01-08T15:25:04.914422Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:532: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 3 2026-01-08T15:25:04.914455Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:541: [my-graph-id.42] [42:4] State committed [4:8:2055], need 0 more acks 2026-01-08T15:25:04.914483Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:543: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2026-01-08T15:25:04.914537Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:4] Got TEvCompleteCheckpointResponse 2026-01-08T15:25:04.914562Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:579: [my-graph-id.42] [42:4] Checkpoint completed 2026-01-08T15:25:05.099585Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:70: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT, checkpointing period 3600.000000s 2026-01-08T15:25:05.099688Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:104: [my-graph-id.42] AllActors count: 3, ActorsToTrigger count: 1, ActorsToNotify count: 2, ActorsToWaitFor count: 3 2026-01-08T15:25:05.099727Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:114: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2026-01-08T15:25:05.099966Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:131: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2026-01-08T15:25:05.099990Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:140: [my-graph-id.42] Successfully registered in storage 2026-01-08T15:25:05.100021Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:141: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2026-01-08T15:25:05.100085Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:149: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2026-01-08T15:25:05.100219Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:186: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2026-01-08T15:25:05.100247Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:212: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2026-01-08T15:25:05.100278Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:349: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2026-01-08T15:25:05.100387Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:389: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2026-01-08T15:25:05.100417Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:435: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2026-01-08T15:25:05.100464Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:445: [my-graph-id.42] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2026-01-08T15:25:05.100633Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2026-01-08T15:25:05.100667Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:476: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2026-01-08T15:25:05.100717Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2026-01-08T15:25:05.100748Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:479: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2026-01-08T15:25:05.100789Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:466: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2026-01-08T15:25:05.100822Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:479: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2026-01-08T15:25:05.100849Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:1] Got all acks for aborted checkpoint, aborting in storage Waiting for TEvAbortCheckpointRequest (storage) 2026-01-08T15:25:05.100903Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:590: [my-graph-id.42] [42:1] Got TEvAbortCheckpointResponse 2026-01-08T15:25:05.100929Z node 5 :STREAMS_CHECKPOINT_COORDINATOR WARN: checkpoint_coordinator.cpp:596: [my-graph-id.42] [42:1] Checkpoint aborted 2026-01-08T15:25:05.100969Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:373: [my-graph-id.42] Got TEvScheduleCheckpointing 2026-01-08T15:25:05.101001Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:349: [my-graph-id.42] [42:2] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2026-01-08T15:25:05.101062Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:389: [my-graph-id.42] [42:2] Got TEvCreateCheckpointResponse 2026-01-08T15:25:05.101090Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:435: [my-graph-id.42] [42:2] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2026-01-08T15:25:05.233567Z node 6 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:70: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT, checkpointing period 3600.000000s 2026-01-08T15:25:05.233666Z node 6 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:104: [my-graph-id.42] AllActors count: 3, ActorsToTrigger count: 0, ActorsToNotify count: 1, ActorsToWaitFor count: 2 2026-01-08T15:25:05.233699Z node 6 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:107: [my-graph-id.42] No ingress tasks, coordinator was disabled 2026-01-08T15:25:05.233726Z node 6 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:445: [my-graph-id.42] Send TEvRun to all actors |85.3%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/checkpointing/ut/unittest >> MediatorTest::RebootTargetTablets [GOOD] >> MediatorTest::ResendSubset >> TTxDataShardBuildFulltextIndexScan::BuildWithTextFromKey [GOOD] >> TTxDataShardBuildFulltextIndexScan::BuildWithRelevance |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |85.3%| [TM] {RESULT} ydb/core/fq/libs/checkpointing/ut/unittest |85.3%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut >> TabletService_ChangeSchema::OnlyAdminsAllowed [GOOD] >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead >> TMLPConsumerTests::ReloadPQTablet [GOOD] >> TMLPConsumerTests::AlterConsumer >> MediatorTest::ResendSubset [GOOD] >> MediatorTest::ResendNotSubset >> DataShardSnapshotIsolation::ReadWriteConflictOnUncommittedWrite [GOOD] >> DataShardSnapshotIsolation::ReadWriteConflictOnCommitWithEffects >> TStateStorageConfig::TestReplicaSelectionUniqueCombinations [GOOD] >> TStateStorageConfig::UniformityTest >> TestPurecalcFilter::Simple1 [GOOD] >> DescribeSchemaSecretsService::GetSameValueMultipleTimes [GOOD] >> DescribeSchemaSecretsService::FailWithoutGrants >> KeyValueGRPCService::TestSimpleExecuteTransactionV2 [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGenerationV1 >> TestPurecalcFilter::Simple2 >> TCreateAndDropViewTest::CheckCreatedView [GOOD] >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag >> TxKeys::ComparePointKeys >> TTxDataShardBuildFulltextIndexScan::BuildWithRelevance [GOOD] >> TTxDataShardBuildIndexScan::BadRequest >> DataShardReplication::SplitMergeChangesReboots [GOOD] >> DataShardReplication::ReplicatedTable+UseSink >> MediatorTest::ResendNotSubset [GOOD] |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/raw_socket/ut/ydb-core-raw_socket-ut |85.3%| [LD] {RESULT} $(B)/ydb/core/raw_socket/ut/ydb-core-raw_socket-ut >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumnAliases-default.txt] >> MediatorTest::OneCoordinatorResendTxNotLost |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/raw_socket/ut/ydb-core-raw_socket-ut >> TxKeys::ComparePointKeys [GOOD] >> TxKeys::ComparePointKeysWithNull >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead >> TestPurecalcFilter::Simple2 [GOOD] >> KqpExecuter::TestSuddenAbortAfterReady >> TestPurecalcFilter::ManyValues >> ConfigGRPCService::ReplaceConfig [GOOD] >> ConfigGRPCService::ReplaceConfigWithInvalidHostConfig >> TxKeys::ComparePointKeysWithNull [GOOD] >> TxKeys::ComparePointAndRange >> TBufferedWriter::Flush_AfterEAGAIN_ShouldRestartFromSavedPositionInBuffer_1 [GOOD] >> TBufferedWriter::Flush_AfterEAGAIN_ShouldRestartFromSavedPositionInBuffer_2 [GOOD] >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] |85.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |85.3%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |85.3%| [AR] {RESULT} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a >> OutgoingStream::Basic [GOOD] >> OutgoingStream::WithExternalLife >> TxKeys::ComparePointAndRange [GOOD] >> TxKeys::ComparePointAndRangeWithNull |85.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/raw_socket/ut/unittest >> TBufferedWriter::Flush_AfterEAGAIN_ShouldRestartFromSavedPositionInBuffer_2 [GOOD] |85.3%| [TS] {BAZEL_UPLOAD} ydb/core/raw_socket/ut/unittest >> DataShardSnapshotIsolation::ReadWriteConflictOnCommitWithEffects [GOOD] >> DataShardSnapshotIsolation::ReadWriteConflictOnCommitAfterAnotherCommit |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/public/list_topics/ut/ydb-core-persqueue-public-list_topics-ut |85.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/public/list_topics/ut/ydb-core-persqueue-public-list_topics-ut |85.4%| [TS] {RESULT} ydb/core/raw_socket/ut/unittest |85.4%| [LD] {RESULT} $(B)/ydb/core/persqueue/public/list_topics/ut/ydb-core-persqueue-public-list_topics-ut >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::InvalidQuery |85.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |85.4%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGenerationV1 [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGenerationV2 >> TxKeys::ComparePointAndRangeWithNull [GOOD] >> TxKeys::ComparePointAndRangeWithInf >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/mediator/ut/unittest >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] >> BasicUsage::CreateTopicWithCustomName [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:26:11.026172Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:11.152924Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:26:11.153284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:26:11.153448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:11.439159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:11.439278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:11.608213Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767885968175364 != 1767885968175368 2026-01-08T15:26:11.633239Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:11.677208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:11.828633Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:26:12.456437Z node 1 :TX_MEDIATOR INFO: mediator__schema.cpp:23: tablet# 72057594047365120 TTxSchema Complete 2026-01-08T15:26:12.457384Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:88: tablet# 72057594047365120 CreateTxInit wait TEvMediatorConfiguration for switching to StateWork from external 2026-01-08T15:26:12.458092Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:853:2738] connected 2026-01-08T15:26:12.458238Z node 1 :TX_MEDIATOR NOTICE: mediator_impl.cpp:133: tablet# 72057594047365120 actor# [1:836:2728] HANDLE TEvMediatorConfiguration Version# 1 2026-01-08T15:26:12.458744Z node 1 :TX_MEDIATOR DEBUG: mediator__configure.cpp:77: tablet# 72057594047365120 version# 1 TTxConfigure Complete 2026-01-08T15:26:12.458892Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:64: tablet# 72057594047365120 CreateTxInit Complete ... waiting for watcher to connect 2026-01-08T15:26:12.459459Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:859:2743] connected 2026-01-08T15:26:12.459540Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:857:2742] to# [1:855:2740] ExecQueue 2026-01-08T15:26:12.459594Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:175: Actor# [1:855:2740] MediatorId# 72057594047365120 HANDLE TEvGranularWatch from# [1:857:2742] bucket# 0 ... waiting for watcher to connect (done) 2026-01-08T15:26:12.459818Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:857:2742] to# [1:855:2740] ExecQueue 2026-01-08T15:26:12.459860Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:159: Actor# [1:855:2740] MediatorId# 72057594047365120 HANDLE TEvWatch 2026-01-08T15:26:12.459911Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:164: Actor# [1:855:2740] MediatorId# 72057594047365120 SEND TEvWatchBucket to# [1:856:2741] bucket.ActiveActor 2026-01-08T15:26:12.459992Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:380: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE {TEvWatchBucket Source# [1:857:2742]} 2026-01-08T15:26:12.460068Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:391: Actor# [1:856:2741] Mediator# 72057594047365120 SEND to# [1:857:2742] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 0} 2026-01-08T15:26:12.470812Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:863:2747] connected 2026-01-08T15:26:12.470940Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2026-01-08T15:26:12.470994Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [1:861:2745] Cookie# 1 CompleteStep# 0 LatestKnownStep# 0 SubjectiveTime# 951 Coordinator# 72057594046316545 2026-01-08T15:26:12.471295Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:855:2740] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1000Steps: {{TCoordinatorStep step# 1000 PrevStep# 0}}} marker# M1 2026-01-08T15:26:12.471339Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:855:2740] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:856:2741] bucket.ActiveActor step# 1000 2026-01-08T15:26:12.471405Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1000} 2026-01-08T15:26:12.471601Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:856:2741] Mediator# 72057594047365120 SEND to# [1:857:2742] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1000} ... waiting for blocked plan step 2026-01-08T15:26:12.489789Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1010 2026-01-08T15:26:12.489846Z node 1 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316545], step# [1010] transactions [1] 2026-01-08T15:26:12.489992Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [1:855:2740] ExecQueue {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:861:2745]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M0 2026-01-08T15:26:12.490096Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:855:2740] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:861:2745]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M1 2026-01-08T15:26:12.490139Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 marker# M2 2026-01-08T15:26:12.490186Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [1:855:2740] MediatorId# 72057594047365120 SEND Ev to# [1:856:2741] step# 1010 forTablet# 72057594047365121 txid# 1 marker# M3 2026-01-08T15:26:12.490247Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:855:2740] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:856:2741] bucket.ActiveActor step# 1010 2026-01-08T15:26:12.490351Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [1:861:2745]}}} marker# M4 2026-01-08T15:26:12.490518Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2026-01-08T15:26:12.491600Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [1:885:2759] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:26:12.491693Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2026-01-08T15:26:12.491742Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [1:856:2741] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 ... waiting for blocked plan step (done) ... waiting for no pending commands 2026-01-08T15:26:12.492063Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:857:2742] to# [1:855:2740] ExecQueue 2026-01-08T15:26:12.492112Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:189: Actor# [1:855:2740] MediatorId# 72057594047365120 HANDLE TEvGranularWatchModify from# [1:857:2742] bucket# 0 ... waiting for no pending commands (done) ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet ... waiting for watch updates 2026-01-08T15:26:12.492369Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:342: Actor# [1:856:2741] Mediator# 72057594047365120 HANDLE {TEvPlanStepAccepted TabletId# 72057594047365121 step# 1010} 2026-01-08T15:26:12.492429Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:415: Actor# [1:856:2741] Mediator# 72057594047365120 SEND to# [1:861:2745] {TEvPlanStepAck TabletId# 72057594047365121 step# 1010 txid# 1} 2026-01-08T15:26:12.492540Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:856:2741] Mediator# 72057594047365120 SEND to# [1:857:2742] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1010} ... waiting for watch updates (done) ... waiting for SysViewsRoster update finished 2026-01-08T15:26:21.085826Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:21.091258Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:26:21.092843Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:26:21.093077Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:21.093169Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:26:22.020994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:22.021095Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:22.040844Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767885972999330 != 1767885972999333 2026-01-08T15:26:22.085037Z node 2 :IMPOR ... EDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [12:901:2767] Cookie# 1 CompleteStep# 0 LatestKnownStep# 0 SubjectiveTime# 950 Coordinator# 72057594046316546 2026-01-08T15:28:07.162260Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1010 2026-01-08T15:28:07.162340Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316545], step# [1010] transactions [1] 2026-01-08T15:28:07.162433Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2026-01-08T15:28:07.162472Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2026-01-08T15:28:07.162596Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [12:855:2740] ExecQueue {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:898:2764]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:901:2767]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M0 2026-01-08T15:28:07.162742Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [12:855:2740] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:898:2764]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:901:2767]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M1 2026-01-08T15:28:07.162816Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 txid# 2 marker# M2 2026-01-08T15:28:07.162888Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:855:2740] MediatorId# 72057594047365120 SEND Ev to# [12:856:2741] step# 1010 forTablet# 72057594047365121 txid# 1 txid# 2 marker# M3 2026-01-08T15:28:07.162962Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 1 txid# 2 marker# M2 2026-01-08T15:28:07.163002Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:855:2740] MediatorId# 72057594047365120 SEND Ev to# [12:856:2741] step# 1010 forTablet# 72057594047365122 txid# 1 txid# 2 marker# M3 2026-01-08T15:28:07.163053Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [12:855:2740] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [12:856:2741] bucket.ActiveActor step# 1010 2026-01-08T15:28:07.163165Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:898:2764]}{TTx Moderator# 0 txid# 2 AckTo# [12:901:2767]}}} marker# M4 2026-01-08T15:28:07.163403Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:898:2764]}{TTx Moderator# 0 txid# 2 AckTo# [12:901:2767]}}} marker# M4 2026-01-08T15:28:07.163591Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2026-01-08T15:28:07.164565Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:909:2773] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:28:07.164646Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2026-01-08T15:28:07.164695Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2026-01-08T15:28:07.164744Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:856:2741] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2026-01-08T15:28:07.165325Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:910:2774] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:28:07.165376Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2026-01-08T15:28:07.165411Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2026-01-08T15:28:07.165456Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:856:2741] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2026-01-08T15:28:07.176607Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [12:913:2777] connected 2026-01-08T15:28:07.176746Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2026-01-08T15:28:07.176806Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [12:911:2775] Cookie# 2 CompleteStep# 1010 LatestKnownStep# 1010 SubjectiveTime# 951 Coordinator# 72057594046316546 2026-01-08T15:28:07.177083Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2026-01-08T15:28:07.177136Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2026-01-08T15:28:07.177222Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:223: tablet# 72057594047365120 SEND EvRequestLostAcks to# [12:855:2740] ExecQueue step {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} 2026-01-08T15:28:07.177385Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:130: Actor# [12:855:2740] MediatorId# 72057594047365120 HANDLE TEvRequestLostAcks {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} AckTo# [12:911:2775] 2026-01-08T15:28:07.177439Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 2 marker# M2 2026-01-08T15:28:07.177497Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:855:2740] MediatorId# 72057594047365120 SEND Ev to# [12:856:2741] step# 1010 forTablet# 72057594047365121 txid# 2 marker# M3 2026-01-08T15:28:07.177587Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 2 marker# M2 2026-01-08T15:28:07.177646Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:855:2740] MediatorId# 72057594047365120 SEND Ev to# [12:856:2741] step# 1010 forTablet# 72057594047365122 txid# 2 marker# M3 2026-01-08T15:28:07.177744Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:911:2775]}}} 2026-01-08T15:28:07.177817Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:911:2775]}}} 2026-01-08T15:28:07.189836Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365121 ClientId: [12:905:2771] ServerId: [12:909:2773] } 2026-01-08T15:28:07.206986Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:934:2787] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2026-01-08T15:28:07.207124Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2026-01-08T15:28:07.207177Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2026-01-08T15:28:07.207246Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:856:2741] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2026-01-08T15:28:07.218870Z node 12 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:07.256512Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365122 ClientId: [12:906:2772] ServerId: [12:910:2774] } 2026-01-08T15:28:07.273755Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:856:2741] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:973:2803] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2026-01-08T15:28:07.273863Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2026-01-08T15:28:07.273913Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2026-01-08T15:28:07.273965Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:856:2741] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 |85.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/mediator/ut/unittest >> BasicUsage::CreateTopicWithAvailabilityPeriod >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> TestPurecalcFilter::ManyValues [GOOD] >> DataShardReplication::ReplicatedTable+UseSink [GOOD] >> DataShardReplication::ReplicatedTable-UseSink >> TTxDataShardBuildIndexScan::BadRequest [GOOD] >> TTxDataShardBuildIndexScan::RunScan >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::MalformedParams >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> TxKeys::ComparePointAndRangeWithInf [GOOD] >> TestPurecalcFilter::NullValues >> test.py::test[solomon-LabelColumnAliases-default.txt] [GOOD] >> ConfigGRPCService::ReplaceConfigWithInvalidHostConfig [GOOD] >> ConfigGRPCService::FetchConfig >> test.py::test[solomon-LabelColumns-default.txt] >> TAsyncComputeActorTest::Basic [GOOD] >> TAsyncComputeActorTest::StatsMode >> TListAllTopicsTests::PlainList >> TAsyncComputeActorTest::StatsMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_keys/unittest >> TxKeys::ComparePointAndRangeWithInf [GOOD] Test command err: 2026-01-08T15:27:57.040737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:27:57.040801Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:57.042703Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:27:57.055729Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:27:57.056040Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2026-01-08T15:27:57.056351Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:27:57.100708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:27:57.109961Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:27:57.110052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:27:57.111728Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2026-01-08T15:27:57.111818Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 9437184 2026-01-08T15:27:57.111895Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 9437184 2026-01-08T15:27:57.112283Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:27:57.112580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:27:57.112648Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 9437184 persisting started state actor id [1:209:2157] in generation 2 2026-01-08T15:27:57.178725Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:27:57.232539Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 9437184 2026-01-08T15:27:57.232729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:27:57.232828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 9437184, actorId: [1:223:2219] 2026-01-08T15:27:57.232859Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:27:57.232898Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2026-01-08T15:27:57.232943Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:27:57.233085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:57.233136Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:57.233382Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2026-01-08T15:27:57.233465Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2026-01-08T15:27:57.233569Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:27:57.233614Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:27:57.233655Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2026-01-08T15:27:57.233687Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2026-01-08T15:27:57.233720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:27:57.233760Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2026-01-08T15:27:57.233802Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:27:57.233876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:219:2216], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:57.233913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:57.233951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:217:2215], serverId# [1:219:2216], sessionId# [0:0:0] 2026-01-08T15:27:57.272961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:104:2137], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 104 RawX2: 4294969433 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2026-01-08T15:27:57.273038Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:27:57.274759Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2026-01-08T15:27:57.276478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2026-01-08T15:27:57.276994Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2026-01-08T15:27:57.278278Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 1 at tablet 9437184 2026-01-08T15:27:57.278734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2026-01-08T15:27:57.279141Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2026-01-08T15:27:57.279984Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2026-01-08T15:27:57.280815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:27:57.289039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2026-01-08T15:27:57.289091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2026-01-08T15:27:57.289153Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit FinishPropose 2026-01-08T15:27:57.289969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:27:57.290839Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayComplete 2026-01-08T15:27:57.290868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2026-01-08T15:27:57.291370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit WaitForPlan 2026-01-08T15:27:57.291821Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2026-01-08T15:27:57.292335Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2026-01-08T15:27:57.332472Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2026-01-08T15:27:57.333011Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:27:57.333470Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:27:57.333507Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2026-01-08T15:27:57.338459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 9437184 not sending time cast registration request in state WaitScheme 2026-01-08T15:27:57.359236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:229:2225], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:57.360600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:57.369349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:228:2224], serverId# [1:229:2225], sessionId# [0:0:0] 2026-01-08T15:27:57.376290Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287424, Sender [1:104:2137], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2026-01-08T15:27:57.376346Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvTxProcessing::TEvPlanStep 2026-01-08T15:27:57.379709Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1811: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2026-01-08T15:27:57.381006Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1826: Execution status for [1000001:1] at 9437184 is Executed 2026-01-08T15:27:57.381802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2026-01-08T15:27:57.382243Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [1000001:1] at 9437184 to execution unit PlanQueue 2026-01-08T15:27:57.430843Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 104 RawX2: 4294969433 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2026-01-08T15:27:57.431371Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:27:57.434197Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:57.434708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:57.435155Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:27:57.435976Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:27:57.436402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2026-01-08T15:27:57.436838Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2026-01-08T15:27:57.437688Z node 1 :TX_DATASHARD TRACE: dat ... ode 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:28:10.269168Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 270270976, Sender [5:26:2073], Recipient [5:134:2156]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2026-01-08T15:28:10.269236Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3191: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2026-01-08T15:28:10.269288Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2026-01-08T15:28:10.269346Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:28:10.272160Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000001 txid# 1} 2026-01-08T15:28:10.272250Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000001} 2026-01-08T15:28:10.272358Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:28:10.274168Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:28:10.274237Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000001:1] at 9437184 on unit CreateTable 2026-01-08T15:28:10.274291Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:28:10.274371Z node 5 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 9437184 2026-01-08T15:28:10.274408Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000001:1] at 9437184 on unit CompleteOperation 2026-01-08T15:28:10.274488Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000001 : 1] from 9437184 at tablet 9437184 send result to client [5:103:2137], exec latency: 0 ms, propose latency: 2 ms 2026-01-08T15:28:10.274558Z node 5 :TX_DATASHARD INFO: datashard.cpp:1599: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2026-01-08T15:28:10.274655Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:28:10.275558Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5937: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 134 RawX2: 21474838636 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 2 2026-01-08T15:28:10.275694Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877760, Sender [5:232:2228], Recipient [5:134:2156]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:234:2229] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:28:10.275744Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3186: StateWork, processing event TEvTabletPipe::TEvClientConnected 2026-01-08T15:28:10.275834Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269552132, Sender [5:127:2152], Recipient [5:134:2156]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2026-01-08T15:28:10.275876Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2026-01-08T15:28:10.275926Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2026-01-08T15:28:10.276018Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2026-01-08T15:28:10.276528Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 65543, Sender [5:103:2137], Recipient [5:134:2156]: NActors::TEvents::TEvPoison 2026-01-08T15:28:10.277131Z node 5 :TX_DATASHARD INFO: datashard.cpp:189: OnDetach: 9437184 2026-01-08T15:28:10.277267Z node 5 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 9437184 2026-01-08T15:28:10.307607Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [5:237:2230], Recipient [5:239:2231]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:10.316063Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [5:237:2230], Recipient [5:239:2231]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:10.316257Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828684, Sender [5:237:2230], Recipient [5:239:2231]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:10.334187Z node 5 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 9437184 actor [5:239:2231] 2026-01-08T15:28:10.338598Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:10.371467Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:704: TxInitSchema.Execute Persist Sys_SubDomainInfo 2026-01-08T15:28:10.413421Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:10.413601Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:10.415956Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2026-01-08T15:28:10.416070Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 9437184 2026-01-08T15:28:10.416148Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 9437184 2026-01-08T15:28:10.416697Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:10.416965Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:10.417034Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 9437184 persisting started state actor id [5:282:2231] in generation 3 2026-01-08T15:28:10.475141Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:10.475294Z node 5 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 9437184 2026-01-08T15:28:10.475412Z node 5 :TX_DATASHARD INFO: datashard.cpp:1599: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2026-01-08T15:28:10.475561Z node 5 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 9437184 mediators count is 0 coordinators count is 1 buckets per mediator 2 2026-01-08T15:28:10.475814Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 9437184, actorId: [5:287:2270] 2026-01-08T15:28:10.475896Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:28:10.475952Z node 5 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 9437184 2026-01-08T15:28:10.475997Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:28:10.476214Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:742: TxInitSchemaDefaults.Execute 2026-01-08T15:28:10.476386Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:754: TxInitSchemaDefaults.Complete 2026-01-08T15:28:10.476696Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [5:239:2231], Recipient [5:239:2231]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:10.476762Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:10.477185Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2026-01-08T15:28:10.477298Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2026-01-08T15:28:10.477457Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5937: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 239 RawX2: 21474838711 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2026-01-08T15:28:10.477558Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 270270976, Sender [5:26:2073], Recipient [5:239:2231]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2026-01-08T15:28:10.477599Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3191: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2026-01-08T15:28:10.477651Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2026-01-08T15:28:10.477703Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:28:10.477764Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:28:10.477829Z node 5 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:10.477899Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2026-01-08T15:28:10.477952Z node 5 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2026-01-08T15:28:10.477995Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:28:10.478040Z node 5 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2026-01-08T15:28:10.478089Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:28:10.478204Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 270270978, Sender [5:26:2073], Recipient [5:239:2231]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2026-01-08T15:28:10.478248Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3192: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2026-01-08T15:28:10.478297Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2026-01-08T15:28:10.478544Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877760, Sender [5:285:2268], Recipient [5:239:2231]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:289:2272] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:28:10.478586Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3186: StateWork, processing event TEvTabletPipe::TEvClientConnected 2026-01-08T15:28:10.478688Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269552132, Sender [5:127:2152], Recipient [5:239:2231]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2026-01-08T15:28:10.478731Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2026-01-08T15:28:10.478781Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2026-01-08T15:28:10.478858Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2026-01-08T15:28:10.495469Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877763, Sender [5:285:2268], Recipient [5:239:2231]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [5:285:2268] ServerId: [5:289:2272] } 2026-01-08T15:28:10.495554Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3187: StateWork, processing event TEvTabletPipe::TEvClientDestroyed |85.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_keys/unittest |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] >> TAsyncComputeActorTest::InputTransformMultichannel |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |85.4%| [TM] {RESULT} ydb/core/tx/mediator/ut/unittest |85.4%| [TM] {RESULT} ydb/core/tx/datashard/ut_keys/unittest |85.4%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> MediatorTimeCast::ReadStepSubscribe >> TMLPConsumerTests::AlterConsumer [GOOD] >> TMLPConsumerTests::RecreateConsumer >> DescribeSchemaSecretsService::FailWithoutGrants [GOOD] >> DescribeSchemaSecretsService::GroupGrants ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2026-01-08T15:26:07.073880Z :SettingsValidation INFO: Random seed for debugging is 1767885967073849 2026-01-08T15:26:08.322239Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012418718617454:2259];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:08.322460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:08.438389Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.612299Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:09.235285Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.235428Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:09.245701Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.331518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.331628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.333425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.333489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.348613Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.384216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:09.384901Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:09.389987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.395328Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:26:09.396615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.477796Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:09.493657Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:09.598526Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.921995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003ea6/r3tmp/yandexr7ua12.tmp 2026-01-08T15:26:09.922020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003ea6/r3tmp/yandexr7ua12.tmp 2026-01-08T15:26:09.922159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003ea6/r3tmp/yandexr7ua12.tmp 2026-01-08T15:26:09.922226Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:10.150862Z INFO: TTestServer started on Port 64385 GrpcPort 29656 PQClient connected to localhost:29656 2026-01-08T15:26:10.601718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:13.330378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012418718617454:2259];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:13.330448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:17.380026Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3e7fh8pe44d9k4ef3b3hz", Request deadline has expired for 1.488682s seconds 2026-01-08T15:26:17.437542Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457373324318:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.437661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457373324305:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.437837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.441501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457373324334:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.441927Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012456481020364:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.442035Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012456481020350:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.442080Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.441567Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.442552Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012456481020388:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.442609Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.447362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:26:17.450431Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457373324358:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.450509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.454448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457373324361:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.454534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.557050Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012457373324323:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:26:17.882440Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012457373324411:3000] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:26:17.895383Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593012456481020390:2140] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:26:17.902989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2026-01-08T15:26:17.907786Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593012456481020387:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:26:18.059538Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593012456481020417:2146] txid# 281474976720658, issues: { message: "Ch ... htTotal: 0 MessagesInflight: 0 } 2026-01-08T15:28:07.829520Z :NOTICE: [/Root] [/Root] [c5c24028-86183ab8-4e784e4f-adbda1fc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:28:07.833573Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_5_1_1283558104710942948_v1 grpc read done: success# 1, data# { read { } } 2026-01-08T15:28:07.833675Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1850: session cookie 1 consumer shared/user session shared/user_5_1_1283558104710942948_v1 got read request: guid# 4ebb08f3-bf461420-1060e2f4-9e9481e 2026-01-08T15:28:07.833908Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_5_1_1283558104710942948_v1 grpc closed 2026-01-08T15:28:07.833958Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_5_1_1283558104710942948_v1 is DEAD 2026-01-08T15:28:07.834901Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_5_1_1283558104710942948_v1 2026-01-08T15:28:07.834956Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [5:7593012923317087687:2585] destroyed 2026-01-08T15:28:07.835006Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_5_1_1283558104710942948_v1 2026-01-08T15:28:07.834920Z node 5 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [5:7593012923317087684:2582] disconnected. 2026-01-08T15:28:07.834954Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [5:7593012923317087684:2582] disconnected; active server actors: 1 2026-01-08T15:28:07.834974Z node 5 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [5:7593012923317087684:2582] client user disconnected session shared/user_5_1_1283558104710942948_v1 2026-01-08T15:28:07.912333Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:07.912379Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:07.912396Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:07.912418Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:07.912434Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:08.022526Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:08.022575Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:08.022594Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:08.022618Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:08.022635Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:09.908439Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.908489Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.908525Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:28:09.920559Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:28:09.928415Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:28:09.928689Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.930775Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2026-01-08T15:28:09.937295Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.937341Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.937384Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:28:09.943151Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:28:09.954690Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:28:09.954893Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:09.958620Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:28:09.960032Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:28:09.962832Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2026-01-08T15:28:09.962993Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2026-01-08T15:28:09.966463Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:28:09.966541Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:28:09.966575Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:28:09.966636Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 57 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2026-01-08T15:28:10.050178Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.050216Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.050248Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:28:10.050633Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:28:10.051171Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:28:10.051650Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.051955Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:28:10.052785Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.053015Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:28:10.053173Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:28:10.053249Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:28:10.053341Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 2026-01-08T15:28:10.070054Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.070262Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.070330Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:28:10.082705Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:28:10.090626Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:28:10.090844Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:10.094633Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:28:10.095639Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:28:10.096199Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:28:10.098472Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2026-01-08T15:28:10.098572Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:28:10.098656Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:28:10.098714Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2026-01-08T15:28:10.102507Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:28:10.102602Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:28:12.100158Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:12.100202Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:12.100285Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:28:12.101234Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:28:12.102405Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:28:12.102664Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:12.103482Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:28:12.103664Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:28:12.103754Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:28:12.103870Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes |85.4%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> DataShardSnapshotIsolation::ReadWriteConflictOnCommitAfterAnotherCommit [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertTwiceThenCommit >> TCreateAndDropViewTest::InvalidQuery [GOOD] >> TCreateAndDropViewTest::ParsingSecurityInvoker |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_vacuum/ydb-core-tx-datashard-ut_vacuum |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_vacuum/ydb-core-tx-datashard-ut_vacuum |85.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_vacuum/ydb-core-tx-datashard-ut_vacuum >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGenerationV2 [GOOD] >> KeyValueGRPCService::SimpleAcquireLockV1 >> Graph::CreateGraphShard |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |85.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> DataShardStats::OneChannelStatsCorrect [GOOD] >> DataShardStats::MultipleChannelsStatsCorrect |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/discovery/ydb-core-kqp-ut-discovery |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/discovery/ydb-core-kqp-ut-discovery |85.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/discovery/ydb-core-kqp-ut-discovery >> TestPurecalcFilter::NullValues [GOOD] |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |85.4%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut >> CompressExecutor::TestExecutorMemUsage [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> TestPurecalcFilter::PartialPush >> ConfigGRPCService::FetchConfig [GOOD] >> ConfigGRPCService::CheckV1IsBlocked |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |85.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut >> TabletService_ExecuteMiniKQL::MalformedParams [GOOD] >> TabletService_ExecuteMiniKQL::MalformedProgram >> Vacuum::Vacuum >> Graph::CreateGraphShard [GOOD] >> Graph::UseGraphShard >> DataShardReplication::ReplicatedTable-UseSink [GOOD] >> DataShardReplication::ApplyChangesToReplicatedTable >> Discovery::DelayedNameserviceResponse >> TSequence::CreateTableWithDefaultFromSequence >> GraphShard::NormalizeAndDownsample1 [GOOD] >> GraphShard::NormalizeAndDownsample2 [GOOD] >> GraphShard::NormalizeAndDownsample3 [GOOD] >> GraphShard::NormalizeAndDownsample4 [GOOD] >> GraphShard::NormalizeAndDownsample5 [GOOD] >> GraphShard::NormalizeAndDownsample6 [GOOD] >> GraphShard::CheckHistogramToPercentileConversions [GOOD] >> GraphShard::CreateGraphShard >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> TTxDataShardBuildIndexScan::RunScan [GOOD] >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction >> TCreateAndDropViewTest::ParsingSecurityInvoker [GOOD] >> TCreateAndDropViewTest::ListCreatedView >> Graph::UseGraphShard [GOOD] >> Graph::MemoryBackendFullCycle >> KeyValueGRPCService::SimpleAcquireLockV1 [GOOD] >> KeyValueGRPCService::SimpleAcquireLockV2 >> MediatorTimeCast::ReadStepSubscribe [GOOD] >> MediatorTimeCast::GranularTimecast >> KqpExecuter::TestSuddenAbortAfterReady [GOOD] >> TestPurecalcFilter::PartialPush [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertTwiceThenCommit [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertAgainOnCommitNoConflict >> TestPurecalcFilter::CompilationValidation |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |85.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut >> GraphShard::CreateGraphShard [GOOD] |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |85.4%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> ConfigGRPCService::CheckV1IsBlocked [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/graph/shard/ut/unittest >> GraphShard::CreateGraphShard [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:28:18.282965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:28:18.283063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:28:18.283105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:28:18.283143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:28:18.283185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:28:18.283216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:28:18.283275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:28:18.283367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:28:18.284315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:28:18.284622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:28:18.374027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:28:18.374102Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:18.385098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:28:18.385448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:28:18.385613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:28:18.398038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:28:18.398485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:28:18.399218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:28:18.399975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:28:18.403347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:28:18.403556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:28:18.404650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:28:18.404717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:28:18.404854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:28:18.404901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:28:18.404949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:28:18.405087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:28:18.518337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:18.519769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 4379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186234409546, cookie: 281474976720657 2026-01-08T15:28:19.624555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186234409546, cookie: 281474976720657 2026-01-08T15:28:19.624710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2026-01-08T15:28:19.624829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000039 2026-01-08T15:28:19.625166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186234409546, cookie: 281474976720658 2026-01-08T15:28:19.625364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000039, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:28:19.625497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000039 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:28:19.625539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:715: TTxOperationPlanStep Execute operation part is already done, operationId: 102:0 2026-01-08T15:28:19.625590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2026-01-08T15:28:19.625993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:1 128 -> 240 2026-01-08T15:28:19.626083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2026-01-08T15:28:19.626247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 8 2026-01-08T15:28:19.626407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:580: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 38], Generation: 2, ActorId:[1:806:2772], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 72075186234409549, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2026-01-08T15:28:19.629115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:28:19.629160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:28:19.629415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:28:19.629456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 38 2026-01-08T15:28:19.629903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2026-01-08T15:28:19.629963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:760: [72057594046678944] TSyncHive, operationId 102:1, ProgressState, NeedSyncHive: 0 2026-01-08T15:28:19.630014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:1 240 -> 240 2026-01-08T15:28:19.630752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:28:19.630865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:28:19.630906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:28:19.630945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 5 2026-01-08T15:28:19.631000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 9 2026-01-08T15:28:19.631099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/2, is published: true 2026-01-08T15:28:19.634884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2026-01-08T15:28:19.634954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:1 ProgressState 2026-01-08T15:28:19.635040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:1 progress is 2/2 2026-01-08T15:28:19.635071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2026-01-08T15:28:19.635118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:1 progress is 2/2 2026-01-08T15:28:19.635146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2026-01-08T15:28:19.635179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/2, is published: true 2026-01-08T15:28:19.635218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2026-01-08T15:28:19.635277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:28:19.635347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:28:19.635562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 8 2026-01-08T15:28:19.635614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:1 2026-01-08T15:28:19.635638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:1 2026-01-08T15:28:19.635729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 7 2026-01-08T15:28:19.636509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2026-01-08T15:28:19.638625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:28:19.638680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2026-01-08T15:28:19.639164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:28:19.639295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:28:19.639363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1007:2939] TestWaitNotification: OK eventTxId 102 2026-01-08T15:28:19.639908Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/db1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:28:19.640144Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/db1" took 266us result status StatusSuccess 2026-01-08T15:28:19.640711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/db1" PathDescription { Self { Name: "db1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 GraphShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 38 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 38 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> KqpExecuter::TestSuddenAbortAfterReady [GOOD] Test command err: Trying to start YDB, gRPC: 7168, MsgBus: 23185 ... waiting for SysViewsRoster update finished 2026-01-08T15:28:08.993223Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:09.198400Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:09.198748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:09.198918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:09.502179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:09.502309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:09.695881Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886086008125 != 1767886086008129 2026-01-08T15:28:09.717255Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:09.772524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:09.952216Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:10.699013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:10.699086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:10.699124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:10.699833Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:10.711276Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:11.285398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:11.317759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:28:11.415010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:11.741907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:12.114657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:12.439985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:13.336338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1911:3517], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:13.336625Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:13.337697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1985:3536], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:13.337779Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:13.372890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:13.621261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:13.872246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:14.110757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:14.375386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:14.680799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:14.946966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:15.257984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:15.655794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2802:4182], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.655966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.656401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2806:4186], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.656593Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.656782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2809:4189], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.667332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:15.840588Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2811:4191], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:28:15.892640Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:2866:4227] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Got NActors::IEventHandle [1:3127:4437] [1:2603:4035] Got NActors::IEventHandle [1:3128:4438] [1:2604:4036] Got NActors::IEventHandle [1:3129:4439] [1:2607:4038] Got NActors::IEventHandle [1:3130:4440] [1:2202:3719] Got NActors::IEventHandle [1:3131:4441] [1:2263:3769] Got NKikimr::TEvTablet::TEvCommit [1:1052:2885] [1:1143:2912 ... :2610:4040] [1:3152:4462] Got NKikimr::TEvTablet::TEvCommitResult [1:2693:4061] [1:2610:4040] Got NKikimr::TEvTxProcessing::TEvReadSetAck [1:1114:2921] [1:1123:2927] Got NKikimr::TEvTxProcessing::TEvReadSetAck [1:1114:2921] [1:2083:3620] Got NKikimr::TEvTxProcessing::TEvReadSetAck [1:1114:2921] [1:2646:4061] Got NKikimr::NGRpcService::TEvGrpcMon::TEvReportPeer [0:6011864930757669447:2188150] [1:8320808721877066593:7169396] Got NActors::TEvents::TEvWakeup [1:122:2168] [0:0:0] Got NActors::TEvents::TEvWakeup [1:20:2067] [0:0:0] Got NKikimr::NGRpcService::TGrpcRequestCall> [1:831:2726] [1:8320808721877066593:7169396] Got NActors::IEventHandle [1:3153:4463] [1:831:2726] Got NKikimr::NKqp::NPrivateEvents::TEvQueryRequest [1:8678280833929343339:121] [1:3153:4463] Got NKikimr::NKqp::NWorkload::TEvSubscribeOnPoolChanges [1:7742373267896299883:25708] [1:66:2113] Got NKikimr::NKqp::NScheduler::TEvAddDatabase [1:7235142148544295275:29292] [1:66:2113] Got NKikimr::NKqp::NPrivateEvents::TEvQueryRequest [1:3126:4436] [1:66:2113] Got NActors::IEventHandle [1:3155:4465] [1:101:2148] Got NKikimr::NKqp::NWorkload::TEvPlaceRequestIntoPool [1:7742373267896299883:25708] [1:3126:4436] Got NKikimr::NKqp::NScheduler::TEvAddPool [1:7235142148544295275:29292] [1:66:2113] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySet [0:7307199536658146131:7762515] [1:3155:4465] Got NActors::IEventHandle [1:3156:4466] [1:101:2148] Got NActors::IEventHandle [1:3157:4467] [1:105:2152] Got NActors::IEventHandle [1:3158:4468] [1:3156:4466] Got NKikimr::NKqp::NScheduler::TEvAddPool [1:7235142148544295275:29292] [1:66:2113] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySet [0:7307199536658146131:7762515] [1:3158:4468] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult [1:3155:4465] [1:3157:4467] Got NActors::IEventHandle [1:3159:4469] [1:105:2152] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvFetchPoolResponse [1:101:2148] [1:3155:4465] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvUpdatePoolSubscription [1:2874:4234] [1:8320808721877066593:7169396] Got NKikimr::NKqp::NWorkload::TEvUpdatePoolInfo [1:66:2113] [1:2874:4234] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult [1:3158:4468] [1:3159:4469] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvFetchPoolResponse [1:3156:4466] [1:3158:4468] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvResolvePoolResponse [1:7742373267896299883:25708] [1:3156:4466] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvResolvePoolResponse [1:2874:4234] [1:3156:4466] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvPlaceRequestIntoPoolResponse [1:7742373267896299883:25708] [1:2874:4234] Got NKikimr::NKqp::NWorkload::TEvContinueRequest [1:3126:4436] [1:2874:4234] Got NKikimr::NKqp::NPrivateEvents::TEvCompileRequest [1:8101253777303040363:6646889] [1:3126:4436] Got NActors::IEventHandle [1:3160:4470] [1:97:2144] Got NActors::IEventHandle [1:3162:4472] [1:3160:4470] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySet [0:7307199536658146131:7762515] [1:3162:4472] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest [1:2892:4249] [1:105:2152] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest [1:2892:4249] [1:105:2152] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2896:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2897:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2898:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2899:4249] [1:2896:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2901:4249] [1:2897:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:24339059:0] [1:2899:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:1099535966835:0] [1:2901:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2903:4249] [1:2898:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2199047594611:0] [1:2903:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2899:4249] [1:2:2049] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2901:4249] [1:5:2052] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2903:4249] [1:8:2055] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2896:4249] [1:2899:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2897:4249] [1:2901:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2898:4249] [1:2903:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2896:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2897:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse [1:105:2152] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2898:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2896:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2897:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2898:4249] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2899:4249] [1:2896:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2901:4249] [1:2897:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:24339059:0] [1:2899:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:1099535966835:0] [1:2901:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2903:4249] [1:2898:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2899:4249] [1:2:2049] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionRequest [1:2199047594611:0] [1:2903:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2901:4249] [1:5:2052] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2896:4249] [1:2899:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2903:4249] [1:8:2055] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2897:4249] [1:2901:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2896:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2898:4249] [1:2903:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2897:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse [1:105:2152] [1:2892:4249] Got NKikimr::NSchemeBoard::NInternalEvents::TEvSyncVersionResponse [1:2892:4249] [1:2898:4249] Got NActors::IEventHandle [1:3163:4473] [1:105:2152] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult [1:3162:4472] [1:3163:4473] Got NActors::IEventHandle [1:3164:4474] [1:3162:4472] Got NKikimr::NStat::TEvStatistics::TEvGetStatistics [1:8534995652929746003:6644585] [1:3164:4474] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySet [0:7307199536658146131:7762515] [1:77:2124] Got NActors::IEventHandle [1:3165:4475] [1:105:2152] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult [1:77:2124] [1:3165:4475] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySet [0:7307199536658146131:7762515] [1:77:2124] Got NActors::IEventHandle [1:3166:4476] [1:105:2152] Got NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult [1:77:2124] [1:3166:4476] Got NKikimr::NStat::TEvStatistics::TEvGetStatisticsResult [1:3164:4474] [1:77:2124] Got NKikimr::NKqp::TEvKqp::TEvContinueProcess [1:3160:4470] [1:8320808721877066593:7169396] Got NKikimr::NKqp::TEvKqp::TEvContinueProcess [1:3160:4470] [1:8320808721877066593:7169396] Got NKikimr::NKqp::TEvKqp::TEvContinueProcess [1:3160:4470] [1:8320808721877066593:7169396] Got NKikimr::NKqp::NPrivateEvents::TEvCompileResponse [1:97:2144] [1:3160:4470] Got NKikimr::NKqp::NPrivateEvents::TEvCompileResponse [1:3126:4436] [1:97:2144] Got NKikimr::TEvTxUserProxy::TEvProposeKqpTransaction [0:6014971197384587348:7762533] [1:3126:4436] Got NKikimr::NKqp::TEvKqpExecuter::TEvTxRequest [1:3167:4436] [1:68:2115] Got NKikimr::NKqp::NScheduler::TEvAddDatabase [1:7235142148544295275:29292] [1:3167:4436] Got NKikimr::NKqp::NScheduler::TEvAddPool [1:7235142148544295275:29292] [1:3167:4436] Got NKikimr::NKqp::NScheduler::TEvAddQuery [1:7235142148544295275:29292] [1:3167:4436] 2026-01-08T15:28:18.457632Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1269} ActorId: [1:3167:4436] TxId: 281474976715673. Ctx: { TraceId: 01kef3j3m254qm2ga4d49xryz2, Database: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE1YWMwYzItYTAwMzkwMy02ZjliMzA0ZS1lNzFiYTE2NQ==, PoolId: default, IsStreamingQuery: 0}. Runtime error Status# STATUS_CODE_UNSPECIFIED Issues# trace_id# Got NKikimr::TEvPipeCache::TEvUnlink [0:7521962744731429200:16741] [1:3167:4436] Got NKikimr::NKqp::TEvKqpExecuter::TEvTxResponse [1:3126:4436] [1:3167:4436] 2026-01-08T15:28:18.457964Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=1&id=ZDE1YWMwYzItYTAwMzkwMy02ZjliMzA0ZS1lNzFiYTE2NQ==, ActorId: [1:3126:4436], ActorState: ExecuteState, LegacyTraceId: 01kef3j3m254qm2ga4d49xryz2, Create QueryResponse for error on request, msg: status# STATUS_CODE_UNSPECIFIED issues# trace_id# Got NKikimr::NSysView::TEvSysView::TEvCollectQueryStats [1:6014387330472966483:2188150] [1:3126:4436] Got NKikimr::TEvTxUserProxy::TEvProposeKqpTransaction [0:6014971197384587348:7762533] [1:3126:4436] Got NKikimr::NKqp::NScheduler::TEvRemoveQuery [1:7235142148544295275:29292] [1:3167:4436] Got NKikimr::NKqp::NWorkload::TEvCleanupRequest [1:2874:4234] [1:3126:4436] Got NActors::TEvents::TEvPoison [1:3167:4436] [1:3167:4436] Got NKikimr::NKqp::NWorkload::TEvPrivate::TEvFinishRequestInPool [1:7742373267896299883:25708] [1:2874:4234] Got NKikimr::NKqp::NScheduler::TEvQueryResponse [1:3167:4436] [1:102:2149] Got NKikimr::NKqp::TEvKqpExecuter::TEvTxRequest [1:3168:4436] [1:68:2115] Got NKikimr::NKqp::NScheduler::TEvAddDatabase [1:7235142148544295275:29292] [1:3168:4436] Got NKikimr::NKqp::NWorkload::TEvCleanupResponse [1:3126:4436] [1:2874:4234] Got NKikimr::NKqp::NScheduler::TEvAddPool [1:7235142148544295275:29292] [1:3168:4436] Got NKikimr::NKqp::NScheduler::TEvAddQuery [1:7235142148544295275:29292] [1:3168:4436] 2026-01-08T15:28:18.458714Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1269} ActorId: [1:3168:4436] TxId: 281474976715674. Ctx: { TraceId: 01kef3j3m254qm2ga4d49xryz2, Database: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE1YWMwYzItYTAwMzkwMy02ZjliMzA0ZS1lNzFiYTE2NQ==, PoolId: default, IsStreamingQuery: 0}. Runtime error Status# STATUS_CODE_UNSPECIFIED Issues# trace_id# Got NKikimr::NKqp::NScheduler::TEvRemoveQuery [1:7235142148544295275:29292] [1:3168:4436] Got NKikimr::TEvPipeCache::TEvUnlink [0:7521962744731429200:16741] [1:3168:4436] Got NKikimr::NKqp::TEvKqpExecuter::TEvTxResponse [1:3126:4436] [1:3168:4436] 2026-01-08T15:28:18.458943Z node 1 :KQP_SESSION ERROR: {KQPSA@kqp_session_actor.cpp:3081} SessionId: ydb://session/3?node_id=1&id=ZDE1YWMwYzItYTAwMzkwMy02ZjliMzA0ZS1lNzFiYTE2NQ==, ActorId: [1:3126:4436], ActorState: CleanupState, LegacyTraceId: 01kef3j3m254qm2ga4d49xryz2, Failed to cleanup issues# trace_id# Got NKikimr::NKqp::NPrivateEvents::TEvQueryResponse [1:66:2113] [1:3126:4436] Got NKikimr::NKqp::NPrivateEvents::TEvQueryResponse [1:3153:4463] [1:66:2113] Got NActors::TEvents::TEvPoison [1:3154:4464] [1:66:2113] Got NActors::TEvents::TEvPoison [1:3168:4436] [1:3168:4436] Got NKikimr::NKqp::NScheduler::TEvQueryResponse [1:3168:4436] [1:102:2149] |85.4%| [TS] {BAZEL_UPLOAD} ydb/core/graph/shard/ut/unittest |85.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/executer_actor/ut/unittest >> TabletService_ExecuteMiniKQL::MalformedProgram [GOOD] >> TabletService_ExecuteMiniKQL::DryRunEraseRow >> OutgoingStream::WithExternalLife [GOOD] >> TPollerActorTest::Registration [GOOD] >> TPollerActorTest::ReadNotification >> TPollerActorTest::ReadNotification [GOOD] >> TPollerActorTest::WriteNotification ------- [TM] {asan, default-linux-x86_64, release} ydb/services/config/ut/unittest >> ConfigGRPCService::CheckV1IsBlocked [GOOD] Test command err: 2026-01-08T15:27:32.865784Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012778816388251:2258];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:32.912225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:27:34.591424Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008495s 2026-01-08T15:27:35.256494Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:35.301502Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.006259s 2026-01-08T15:27:35.463452Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.019095s 2026-01-08T15:27:35.689035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:35.689677Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:35.715855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:35.715942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:35.757735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:35.817190Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:35.854828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715657 2026-01-08T15:27:35.854878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715657 2026-01-08T15:27:35.854967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715692:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.855022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715692:0 ProgressState 2026-01-08T15:27:35.855054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715692 ready parts: 1/1 2026-01-08T15:27:35.864385Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:372: Grpc request proxy started, nodeid# 1, serve as static node 2026-01-08T15:27:35.865418Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:603: Subscribe to /Root 2026-01-08T15:27:35.865503Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:372: Grpc request proxy started, nodeid# 1, serve as static node 2026-01-08T15:27:35.867670Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:603: Subscribe to /Root 2026-01-08T15:27:35.902747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:213: SysViewsRosterUpdate# [1:7593012791701290600:2388] at schemeshard: 72057594046644480 Handle TEvNotifyTxCompletionResult, mkdir '/Root/.sys' 2026-01-08T15:27:35.912597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715692 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.913528Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1001 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } VDiskStatus { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1002 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } VDiskStatus { VDiskId { GroupID: 2181038083 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1003 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } VDiskStatus { VDiskId { GroupID: 2181038084 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1004 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2026-01-08T15:27:35.914265Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:425: Got proxy service configuration 2026-01-08T15:27:35.914295Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:408: Subscribed for config changes 2026-01-08T15:27:35.914309Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:416: Updated app config 2026-01-08T15:27:35.914350Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:425: Got proxy service configuration 2026-01-08T15:27:35.914358Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:408: Subscribed for config changes 2026-01-08T15:27:35.914364Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:416: Updated app config 2026-01-08T15:27:35.917755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715691:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.917962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715691:0 ProgressState 2026-01-08T15:27:35.918002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715691 ready parts: 1/1 2026-01-08T15:27:35.918128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715691 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.918403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715686:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.918420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715686:0 ProgressState 2026-01-08T15:27:35.918439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715686 ready parts: 1/1 2026-01-08T15:27:35.918522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715686 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.918662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715690:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.918673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715690:0 ProgressState 2026-01-08T15:27:35.918686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715690 ready parts: 1/1 2026-01-08T15:27:35.919418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715690 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.920424Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2026-01-08T15:27:35.920688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715685:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.920725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715685:0 ProgressState 2026-01-08T15:27:35.920756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715685 ready parts: 1/1 2026-01-08T15:27:35.920886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715685 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.921529Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } State: Initial Replicated: false DiskSpace: Green } } 2026-01-08T15:27:35.921682Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } State: Initial Replicated: false DiskSpace: Green } } 2026-01-08T15:27:35.921767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715689:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.921788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715689:0 ProgressState 2026-01-08T15:27:35.921824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715689 ready parts: 1/1 2026-01-08T15:27:35.921920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715689 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:27:35.922102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976715684:0, at schemeshard: 72057594046644480 2026-01-08T15:27:35.922118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:30: [72057594046644480] TCreateSysView::TPropose, opId: 281474976715684:0 ProgressState 2026-01-08T15:27:35.922139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 281474976715684 ready parts: 1/1 2026-01-08T15:27:35.922189Z node 1 :FLAT_T ... rsion: 6 PathOwnerId: 72057594046644480, cookie: 281474976715657 2026-01-08T15:28:17.129683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715657 2026-01-08T15:28:17.129709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715657, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 6 2026-01-08T15:28:17.129732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2026-01-08T15:28:17.129824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715657, subscribers: 1 2026-01-08T15:28:17.129842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [1:7593012971125618311:2305] 2026-01-08T15:28:17.131031Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:28:17.131179Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:28:17.131191Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:28:17.131233Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:28:17.133736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715657 2026-01-08T15:28:17.172269Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# ReplaceConfigRequest, traceId# 01kef3j2rkdbs7psm06zkptk85, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:45500, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:28:17.194119Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 5670699008 State: Normal SlotSizeInUnits: 0 PDiskUsage: 0.098231827111984277 } } 2026-01-08T15:28:17.203599Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { GetInterfaceVersion { } } } 2026-01-08T15:28:17.204205Z node 1 :BS_CONTROLLER DEBUG: {BSC24@console_interaction.cpp:224} Console replace config request Request# {ClusterYaml: "\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\n\nconfig:\n host_configs:\n - host_config_id: 1\n drive:\n - path: SectorMap:1:64\n type: SSD\n - path: SectorMap:2:64\n type: SSD\n - host_config_id: 2\n drive:\n - path: SectorMap:3:64\n type: SSD\n hosts:\n - host: ::1\n port: 12001\n host_config_id: 2\n feature_flags:\n switch_to_config_v2: true\n" DedicatedConfigMode: false AllowUnknownFields: false BypassMetadataChecks: false PeerName: "ipv6:[::1]:45500" UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" } 2026-01-08T15:28:17.205636Z node 1 :BS_CONTROLLER DEBUG: {BSC36@console_interaction.cpp:407} Sending TEvControllerValidateConfigRequest to console ConsolePipe# [1:7593012966830650439:2454] 2026-01-08T15:28:17.221071Z node 1 :BS_CONTROLLER DEBUG: {BSC27@console_interaction.cpp:478} Console validate config response Response# {Status: ConfigIsValid YAML: "\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 1\n\nconfig:\n host_configs:\n - host_config_id: 1\n drive:\n - path: SectorMap:1:64\n type: SSD\n - path: SectorMap:2:64\n type: SSD\n - host_config_id: 2\n drive:\n - path: SectorMap:3:64\n type: SSD\n hosts:\n - host: ::1\n port: 12001\n host_config_id: 2\n feature_flags:\n switch_to_config_v2: true\n" } 2026-01-08T15:28:17.228958Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "SectorMap:1:64" Type: SSD } Drive { Path: "SectorMap:2:64" Type: SSD } ItemConfigGeneration: 1 } } Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "SectorMap:3:64" Type: SSD } } } Command { DefineBox { BoxId: 999 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } ItemConfigGeneration: 1 } } Rollback: true StorageConfig { type_url: "type.googleapis.com/NKikimrBlobStorage.TStorageConfig" value: "\032\234\001\nL\n\030\010\001\020\001\032\016SectorMap:1:64(\0010\001\022\030\n\n\010\000\020\001\030\000 \000(\000\022\010\010\001\020\001\030\000 \001\030\000\032\024\010\000\020\001\030\000\"\014\n\n\n\010\010\001\020\001\030\000 \001 \001R*\010\001\032\022\n\016SectorMap:1:64\020\001\032\022\n\016SectorMap:2:64\020\001R\026\010\002\032\022\n\016SectorMap:3:64\020\001Z\010\010\001\"\004\020\002\030\001*,\n\003::1\020\341]\030\001\" R\007default\362\001\020generated-rack-1\240\2111\001J\010\010\001\022\004\010\001\030\001R\010\010\001\022\004\010\001\030\001Z\010\010\001\022\004\010\001\030\001" } } 2026-01-08T15:28:17.229374Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# SectorMap:1:64 2026-01-08T15:28:17.229389Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# SectorMap:2:64 2026-01-08T15:28:17.229405Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1 2026-01-08T15:28:17.229502Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# SectorMap:3:64 2026-01-08T15:28:17.229514Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1001 2026-01-08T15:28:17.229529Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1 2026-01-08T15:28:17.229544Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1000 2026-01-08T15:28:17.234608Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:17.236861Z node 1 :BS_CONTROLLER DEBUG: {BSC14@bsc.cpp:624} ApplyStorageConfig Request# {Request { Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "SectorMap:1:64" Type: SSD } Drive { Path: "SectorMap:2:64" Type: SSD } ItemConfigGeneration: 1 } } Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "SectorMap:3:64" Type: SSD } } } Command { DefineBox { BoxId: 999 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } ItemConfigGeneration: 1 } } StorageConfig { type_url: "type.googleapis.com/NKikimrBlobStorage.TStorageConfig" value: "\032\234\001\nL\n\030\010\001\020\001\032\016SectorMap:1:64(\0010\001\022\030\n\n\010\000\020\001\030\000 \000(\000\022\010\010\001\020\001\030\000 \001\030\000\032\024\010\000\020\001\030\000\"\014\n\n\n\010\010\001\020\001\030\000 \001 \001R*\010\001\032\022\n\016SectorMap:1:64\020\001\032\022\n\016SectorMap:2:64\020\001R\026\010\002\032\022\n\016SectorMap:3:64\020\001Z\010\010\001\"\004\020\002\030\001*,\n\003::1\020\341]\030\001\" R\007default\362\001\020generated-rack-1\240\2111\001J\010\010\001\022\004\010\001\030\001R\010\010\001\022\004\010\001\030\001Z\010\010\001\022\004\010\001\030\001" } } } 2026-01-08T15:28:17.238522Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "SectorMap:1:64" Type: SSD } Drive { Path: "SectorMap:2:64" Type: SSD } ItemConfigGeneration: 1 } } Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "SectorMap:3:64" Type: SSD } } } Command { DefineBox { BoxId: 999 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } ItemConfigGeneration: 1 } } StorageConfig { type_url: "type.googleapis.com/NKikimrBlobStorage.TStorageConfig" value: "\032\234\001\nL\n\030\010\001\020\001\032\016SectorMap:1:64(\0010\001\022\030\n\n\010\000\020\001\030\000 \000(\000\022\010\010\001\020\001\030\000 \001\030\000\032\024\010\000\020\001\030\000\"\014\n\n\n\010\010\001\020\001\030\000 \001 \001R*\010\001\032\022\n\016SectorMap:1:64\020\001\032\022\n\016SectorMap:2:64\020\001R\026\010\002\032\022\n\016SectorMap:3:64\020\001Z\010\010\001\"\004\020\002\030\001*,\n\003::1\020\341]\030\001\" R\007default\362\001\020generated-rack-1\240\2111\001J\010\010\001\022\004\010\001\030\001R\010\010\001\022\004\010\001\030\001Z\010\010\001\022\004\010\001\030\001" } } 2026-01-08T15:28:17.238927Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# SectorMap:1:64 2026-01-08T15:28:17.238950Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# SectorMap:2:64 2026-01-08T15:28:17.238971Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1 2026-01-08T15:28:17.239088Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# SectorMap:3:64 2026-01-08T15:28:17.239100Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1001 2026-01-08T15:28:17.239108Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1 2026-01-08T15:28:17.239115Z node 1 :BS_CONTROLLER NOTICE: {BSCFP03@config_fit_pdisks.cpp:363} PDisk to remove: PDiskId# 1:1000 2026-01-08T15:28:17.242701Z node 1 :BS_CONTROLLER ERROR: {BSC15@bsc.cpp:687} TEvControllerConfigResponse Response# {Status { Success: true } Status { Success: true } Status { Success: true } ErrorDescription: "failed to remove PDisk# 1:1 as it has active VSlots" ConfigTxSeqNo: 4 } 2026-01-08T15:28:17.247195Z node 1 :BS_CONTROLLER DEBUG: {BSC20@console_interaction.cpp:199} Console commit config response Response# {Status: Committed } response: ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Config.ReplaceConfigResult] { } } 2026-01-08T15:28:17.266419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:8101: Got new config: FeatureFlags { SwitchToConfigV2: true } QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2026-01-08T15:28:17.266457Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:17.267552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2026-01-08T15:28:17.267590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:28:17.279167Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# FetchConfigRequest, traceId# 01kef3j2vabgg57y3xrm960e9g, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:45500, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:28:17.280221Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { GetInterfaceVersion { } } } 2026-01-08T15:28:17.311532Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# ReplaceConfigRequest, traceId# 01kef3j2we8gr9bjzvvqeq4e6d, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:45500, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef |85.4%| [TM] {BAZEL_UPLOAD} ydb/services/config/ut/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |85.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |85.4%| [TS] {RESULT} ydb/core/graph/shard/ut/unittest |85.4%| [TM] {RESULT} ydb/core/kqp/executer_actor/ut/unittest |85.4%| [TM] {RESULT} ydb/services/config/ut/unittest |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery >> Vacuum::Vacuum [GOOD] >> TMemoryController::Counters >> Vacuum::VacuumWithoutCompaction >> DataShardReplication::ApplyChangesToReplicatedTable [GOOD] >> DataShardReplication::ApplyChangesToCommonTable |85.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries >> TListAllTopicsTests::PlainList [GOOD] >> TListAllTopicsTests::RecursiveList >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery >> TestPurecalcFilter::CompilationValidation [GOOD] >> TCreateAndDropViewTest::ListCreatedView [GOOD] >> TCreateAndDropViewTest::CreateSameViewTwice >> test.py::test[solomon-Subquery-default.txt] [GOOD] >> test.py::test[solomon-UnknownSetting-] >> MediatorTimeCast::GranularTimecast [GOOD] >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::SequencesIndex >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus >> DescribeSchemaSecretsService::GroupGrants [GOOD] >> DescribeSchemaSecretsService::BatchRequest >> TestPurecalcFilter::Emtpy >> TestPurecalcFilter::Emtpy [GOOD] >> TestPurecalcFilter::Watermark >> SequenceProxy::Basics >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery >> Discovery::DelayedNameserviceResponse [GOOD] >> Discovery::SecondDiscovererRequestBeforeNameserviceResponse >> KeyValueGRPCService::SimpleAcquireLockV2 [GOOD] >> KeyValueGRPCService::SimpleRenameUnexistedKeyV1 >> TPollerActorTest::WriteNotification [GOOD] >> TPollerActorTest::HangupNotification >> ExportS3BufferTest::MinBufferSize [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompression [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] >> TPollerActorTest::HangupNotification [GOOD] >> Sticking::Check ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/time_cast/ut/unittest >> MediatorTimeCast::GranularTimecast [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:15.579298Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:15.700961Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:15.701336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:15.701498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:16.027094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:16.027219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:16.157916Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886092694422 != 1767886092694426 2026-01-08T15:28:16.168954Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:16.217652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:16.335301Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:16.655944Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:922: Actor# [1:26:2073] HANDLE NKikimr::TEvMediatorTimecast::TEvSubscribeReadStep{ CoordinatorId# 72057594046316545 } 2026-01-08T15:28:16.656340Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:584: Actor# [1:26:2073] HANDLE EvClientConnected 2026-01-08T15:28:16.660350Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:999: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 1 LastAcquireStep: 0 NextAcquireStep: 1000 2026-01-08T15:28:16.675587Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:16.791299Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 1500 2026-01-08T15:28:16.879354Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 2000 2026-01-08T15:28:17.064690Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 3000 2026-01-08T15:28:17.202762Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 4000 2026-01-08T15:28:17.359476Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 5000 2026-01-08T15:28:17.546678Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 6000 2026-01-08T15:28:17.557353Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:970: Actor# [1:26:2073] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 8000 } 2026-01-08T15:28:17.704448Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 7000 2026-01-08T15:28:17.851607Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 8000 2026-01-08T15:28:17.854000Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:593: Actor# [1:26:2073] HANDLE EvClientDestroyed 2026-01-08T15:28:17.882852Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:584: Actor# [1:26:2073] HANDLE EvClientConnected 2026-01-08T15:28:17.883628Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:999: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 2 LastAcquireStep: 0 NextAcquireStep: 8000 2026-01-08T15:28:17.899561Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:970: Actor# [1:26:2073] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 13000 } 2026-01-08T15:28:18.002276Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 8500 2026-01-08T15:28:18.097908Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 9000 2026-01-08T15:28:18.244591Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 10000 2026-01-08T15:28:18.391184Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 11000 2026-01-08T15:28:18.538423Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 12000 2026-01-08T15:28:18.664461Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1049: Actor# [1:26:2073] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 13000 ... waiting for SysViewsRoster update finished 2026-01-08T15:28:21.957526Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:21.964190Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:21.966389Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:21.966802Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:21.966946Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:22.229117Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:22.229270Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:22.252634Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886099091865 != 1767886099091868 2026-01-08T15:28:22.261408Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:22.307773Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:22.379698Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:22.813222Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:460: Actor# [2:26:2073] HANDLE {TEvRegisterTablet TabletId# 72057594047365120 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2026-01-08T15:28:22.814140Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:276: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 1 Tablets: 72057594047365120 MinStep: 0 2026-01-08T15:28:22.814212Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:378: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2026-01-08T15:28:22.814279Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:508: Actor# [2:26:2073] SEND to Sender# [2:886:2754] {TEvRegisterTabletResult TabletId# 72057594047365120 Entry# 0} 2026-01-08T15:28:22.814676Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:584: Actor# [2:26:2073] HANDLE EvClientConnected 2026-01-08T15:28:22.814828Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 1000 2026-01-08T15:28:22.815034Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:631: Actor# [2:26:2073] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 1000} 2026-01-08T15:28:22.815277Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:460: Actor# [2:26:2073] HANDLE {TEvRegisterTablet TabletId# 72057594047365121 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2026-01-08T15:28:22.815368Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:304: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 2 AddTablets: 72057594047365121 2026-01-08T15:28:22.815430Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:508: Actor# [2:26:2073] SEND to Sender# [2:889:2756] {TEvRegisterTabletResult TabletId# 72057594047365121 Entry# 1000} 2026-01-08T15:28:22.815681Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 2 LatestStep: 1000 2026-01-08T15:28:22.815895Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:460: Actor# [2:26:2073] HANDLE {TEvRegisterTablet TabletId# 72057594047365123 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2026-01-08T15:28:22.815985Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:304: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 3 AddTablets: 72057594047365123 2026-01-08T15:28:22.816041Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:508: Actor# [2:26:2073] SEND to Sender# [2:890:2757] {TEvRegisterTabletResult TabletId# 72057594047365123 Entry# 1000} 2026-01-08T15:28:22.816282Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxM ... GranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 0 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.516525Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 3500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 3499 FrozenSteps: 3499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.529744Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 3500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 3499 FrozenSteps: 3499 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4000 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.546840Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4000 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.557867Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 FrozenTablets: 72057594047365121 FrozenSteps: 3999 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.582931Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 FrozenTablets: 72057594047365121 FrozenSteps: 3999 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 FrozenTablets: 72057594047365121 FrozenSteps: 4499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.606888Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 FrozenTablets: 72057594047365121 FrozenSteps: 4499 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 UnfrozenTablets: 72057594047365121 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2026-01-08T15:28:23.630289Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:698: Actor# [2:26:2073] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 5 LatestStep: 4500 UnfrozenTablets: 72057594047365121 ... restarting mediator 2026-01-08T15:28:23.648353Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:593: Actor# [2:26:2073] HANDLE EvClientDestroyed 2026-01-08T15:28:23.648539Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:276: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 6 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 4500 2026-01-08T15:28:23.648592Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:361: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2026-01-08T15:28:23.649607Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:584: Actor# [2:26:2073] HANDLE EvClientConnected 2026-01-08T15:28:23.649775Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:276: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 7 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 4500 2026-01-08T15:28:23.649825Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:361: Actor# [2:26:2073] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2026-01-08T15:28:23.667355Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:584: Actor# [2:26:2073] HANDLE EvClientConnected ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2026-01-08T15:28:23.667757Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:631: Actor# [2:26:2073] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 0} ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... fully unblocking tx1 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2026-01-08T15:28:23.691453Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:631: Actor# [2:26:2073] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3500} ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... tablet1 at 3500 ... tablet2 at 4500 ... tablet3 at 4500 ... fully unblocking tx2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 2026-01-08T15:28:23.702551Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:631: Actor# [2:26:2073] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 4000} ... tablet1 at 4000 ... tablet2 at 4500 ... tablet3 at 4500 ... fully unblocking tx3 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 2026-01-08T15:28:23.713471Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:631: Actor# [2:26:2073] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 4500} ... tablet1 at 4500 ... tablet2 at 4500 ... tablet3 at 4500 |85.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/time_cast/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] Test command err: RandomSeed# 879915825480804699 Step = 0 SEND TEvPut with key [1:1:0:0:0:585447:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:585447:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:619381:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:619381:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:725585:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:725585:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:2934723:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:2934723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2026-01-08T15:22:55.517812Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Step = 6 SEND TEvPut with key [1:1:6:0:0:3044947:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:3044947:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Stop node 7 2026-01-08T15:22:55.900847Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 7 SEND TEvPut with key [1:1:7:0:0:582354:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:582354:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 8 SEND TEvPut with key [1:1:8:0:0:1478820:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:1478820:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 9 SEND TEvPut with key [1:1:9:0:0:1360774:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:1360774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Start node 4 Step = 10 SEND TEvPut with key [1:1:10:0:0:1727870:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:1727870:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 11 SEND TEvPut with key [1:1:11:0:0:1883457:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:1883457:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 12 SEND TEvPut with key [1:1:12:0:0:568368:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:568368:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 13 SEND TEvPut with key [1:1:13:0:0:896600:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:896600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 15 SEND TEvPut with key [1:1:15:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 16 SEND TEvPut with key [1:1:16:0:0:670396:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:670396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 17 SEND TEvPut with key [1:1:17:0:0:1584741:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:1584741:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 18 SEND TEvPut with key [1:1:18:0:0:2384818:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:2384818:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 19 SEND TEvPut with key [1:1:19:0:0:2867010:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:2867010:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 20 SEND TEvPut with key [1:1:20:0:0:2911789:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:2911789:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 21 SEND TEvPut with key [1:1:21:0:0:2463622:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:2463622:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 23 SEND TEvPut with key [1:1:23:0:0:2119770:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:2119770:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 25 SEND TEvPut with key [1:1:25:0:0:2648607:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:2648607:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Stop node 0 2026-01-08T15:22:57.943059Z 3 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:194:17] ServerId# [1:296:63] TabletId# 72057594037932033 PipeClientId# [3:194:17] 2026-01-08T15:22:57.943230Z 6 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:215:17] ServerId# [1:299:66] TabletId# 72057594037932033 PipeClientId# [6:215:17] 2026-01-08T15:22:57.943364Z 5 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:7680:16] ServerId# [1:7688:1098] TabletId# 72057594037932033 PipeClientId# [5:7680:16] 2026-01-08T15:22:57.943450Z 4 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:201:17] ServerId# [1:297:64] TabletId# 72057594037932033 PipeClientId# [4:201:17] 2026-01-08T15:22:57.943535Z 2 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:187:17] ServerId# [1:295:62] TabletId# 72057594037932033 PipeClientId# [2:187:17] 2026-01-08T15:22:57.943677Z 7 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:222:17] ServerId# [1:300:67] TabletId# 72057594037932033 PipeClientId# [7:222:17] Step = 26 SEND TEvPut with key [1:1:26:0:0:539431:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:539431:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 28 SEND TEvPut with key [1:1:28:0:0:2673563:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:2673563:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 30 SEND TEvPut with key [1:1:30:0:0:2398732:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:2398732:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Compact vdisk 2 Step = 31 SEND TEvPut with key [1:1:31:0:0:2302132:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:2302132:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 32 SEND TEvPut with key [1:1:32:0:0:3112269:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:3112269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 33 SEND TEvPut with key [1:1:33:0:0:883758:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:883758:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 34 SEND TEvPut with key [1:1:34:0:0:1212958:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:1212958:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 35 SEND TEvPut with key [1:1:35:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 38 SEND TEvPut with key [1:1:38:0:0:1252178:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:1252178:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 39 SEND TEvPut with key [1:1:39:0:0:1897783:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:1897783:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 40 SEND TEvPut with key [1:1:40:0:0:1486678:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:1486678:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 41 SEND TEvPut with key [1:1:41:0:0:1285964:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:1285964:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 42 SEND TEvPut with key [1:1:42:0:0:1221731:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:1221731:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 43 SEND TEvPut with key [1:1:43:0:0:1613844:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:1613844:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 44 SEND TEvPut with key [1:1:44:0:0:2582908:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:2582908:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 45 SEND TEvPut with key [1:1:45:0:0:1703743:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:1703743:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 46 SEND TEvPut with key [1:1:46:0:0:1362981:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:1362981:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 47 SEND TEvPut with key [1:1:47:0:0:1469807:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:1469807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 48 SEND TEvPut with key [1:1:48:0:0:2832565:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:2832565:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 49 SEND TEvPut with key [1:1:49:0:0:1960611:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:1960611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 50 SEND TEvPut with key [1:1:50:0:0:1164230:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:1164230:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 51 SEND TEvPut with key [1:1:51:0:0:836900:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:836900:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 52 SEND TEvPut with key [1:1:52:0:0:838380:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:838380:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 53 SEND TEvPut with key [1:1:53:0:0:1975575:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:1975575:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Start node 0 Step = 54 SEND TEvPut with key [1:1:54:0:0:1888556:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:1888556:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 55 SEND TEvPut with key [1:1:55:0:0:715063:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:715063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 57 SEND TEvPut with key [1:1:57:0:0:1491407:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:1491407:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 58 SEND TEvPut with key [1:1:58:0:0:702845:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:702845:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 59 SEND TEvPut with key [1:1:59:0:0:2539948:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:2539948:0] Status# ... END TEvPut with key [1:1:936:0:0:2748248:0] TEvPutResult: TEvPutResult {Id# [1:1:936:0:0:2748248:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 937 SEND TEvPut with key [1:1:937:0:0:112302:0] TEvPutResult: TEvPutResult {Id# [1:1:937:0:0:112302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 938 SEND TEvPut with key [1:1:938:0:0:800417:0] TEvPutResult: TEvPutResult {Id# [1:1:938:0:0:800417:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 939 SEND TEvPut with key [1:1:939:0:0:2336442:0] TEvPutResult: TEvPutResult {Id# [1:1:939:0:0:2336442:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Step = 940 SEND TEvPut with key [1:1:940:0:0:982070:0] TEvPutResult: TEvPutResult {Id# [1:1:940:0:0:982070:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Start node 4 Step = 941 SEND TEvPut with key [1:1:941:0:0:713632:0] TEvPutResult: TEvPutResult {Id# [1:1:941:0:0:713632:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 942 SEND TEvPut with key [1:1:942:0:0:1644191:0] TEvPutResult: TEvPutResult {Id# [1:1:942:0:0:1644191:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 943 SEND TEvPut with key [1:1:943:0:0:254634:0] TEvPutResult: TEvPutResult {Id# [1:1:943:0:0:254634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 944 SEND TEvPut with key [1:1:944:0:0:1141270:0] TEvPutResult: TEvPutResult {Id# [1:1:944:0:0:1141270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 945 SEND TEvPut with key [1:1:945:0:0:610103:0] TEvPutResult: TEvPutResult {Id# [1:1:945:0:0:610103:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Compact vdisk 6 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 948 SEND TEvPut with key [1:1:948:0:0:645630:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:645630:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 949 SEND TEvPut with key [1:1:949:0:0:2125890:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:2125890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 950 SEND TEvPut with key [1:1:950:0:0:2544891:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:2544891:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 951 SEND TEvPut with key [1:1:951:0:0:647007:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:647007:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 952 SEND TEvPut with key [1:1:952:0:0:2031652:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:2031652:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Step = 953 SEND TEvPut with key [1:1:953:0:0:2109805:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:2109805:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999561} Stop node 3 2026-01-08T15:25:51.188481Z 1 00h25m30.797719s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:1353403:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:1353403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Stop node 4 2026-01-08T15:25:52.770055Z 1 00h25m40.802270s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 955 SEND TEvPut with key [1:1:955:0:0:1286278:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:1286278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Start node 3 Step = 956 SEND TEvPut with key [1:1:956:0:0:1875483:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:1875483:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 957 SEND TEvPut with key [1:1:957:0:0:1021388:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:1021388:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Start node 4 Step = 958 SEND TEvPut with key [1:1:958:0:0:860806:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:860806:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 961 SEND TEvPut with key [1:1:961:0:0:1661659:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:1661659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 962 SEND TEvPut with key [1:1:962:0:0:771410:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:771410:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 963 SEND TEvPut with key [1:1:963:0:0:1414281:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:1414281:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 964 SEND TEvPut with key [1:1:964:0:0:2848837:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:2848837:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 965 SEND TEvPut with key [1:1:965:0:0:989600:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:989600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 966 SEND TEvPut with key [1:1:966:0:0:2761296:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:2761296:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 967 SEND TEvPut with key [1:1:967:0:0:981163:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:981163:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 969 SEND TEvPut with key [1:1:969:0:0:626285:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:626285:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Stop node 7 2026-01-08T15:26:02.736017Z 1 00h26m10.805632s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 971 SEND TEvPut with key [1:1:971:0:0:972888:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:972888:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999597} Step = 972 SEND TEvPut with key [1:1:972:0:0:786055:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:786055:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999597} Step = 973 SEND TEvPut with key [1:1:973:0:0:2707502:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:2707502:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999597} Stop node 1 2026-01-08T15:26:04.102704Z 1 00h26m20.806144s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:2660812:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:2660812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 1 Step = 975 SEND TEvPut with key [1:1:975:0:0:3005283:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:3005283:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 1 2026-01-08T15:26:04.823220Z 1 00h26m40.807168s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:1542748:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:1542748:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:2837300:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:2837300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:1760402:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:1760402:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 981 SEND TEvPut with key [1:1:981:0:0:1711812:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:1711812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:1422922:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:1422922:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 983 SEND TEvPut with key [1:1:983:0:0:2533122:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:2533122:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:1862506:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:1862506:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:672278:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:672278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 988 SEND TEvPut with key [1:1:988:0:0:2042425:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:2042425:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 989 SEND TEvPut with key [1:1:989:0:0:1201477:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:1201477:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 990 SEND TEvPut with key [1:1:990:0:0:1724337:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:1724337:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 991 SEND TEvPut with key [1:1:991:0:0:2174403:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:2174403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 993 SEND TEvPut with key [1:1:993:0:0:618508:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:618508:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 994 SEND TEvPut with key [1:1:994:0:0:2278246:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:2278246:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 995 SEND TEvPut with key [1:1:995:0:0:2001881:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:2001881:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 996 SEND TEvPut with key [1:1:996:0:0:1759634:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:1759634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 997 SEND TEvPut with key [1:1:997:0:0:2469234:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:2469234:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 998 SEND TEvPut with key [1:1:998:0:0:1329395:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:1329395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 999 SEND TEvPut with key [1:1:999:0:0:1243807:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:1243807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Starting nodes Start compaction 1 Start checking |85.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> DataShardSnapshotIsolation::ReadWriteUpsertAgainOnCommitNoConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertAgainTwiceOnCommitNoConflict |85.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_export/unittest >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] |85.5%| [TS] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_export/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery >> DataShardStats::MultipleChannelsStatsCorrect [GOOD] >> DataShardStats::HistogramStatsCorrect >> BasicUsage::CreateTopicWithAvailabilityPeriod [GOOD] >> BasicUsage::CreateTopicWithCustomMetricsLevel |85.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |85.5%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |85.5%| [TM] {RESULT} ydb/core/tx/time_cast/ut/unittest >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> Graph::MemoryBackendFullCycle [GOOD] >> Graph::LocalBackendFullCycle |85.5%| [TS] {RESULT} ydb/core/tx/datashard/ut_export/unittest |85.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |85.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs >> TabletService_ExecuteMiniKQL::DryRunEraseRow [GOOD] >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed >> SequenceProxy::Basics [GOOD] >> SequenceProxy::DropRecreate >> TMLPConsumerTests::RecreateConsumer [GOOD] >> TMLPConsumerTests::ReloadPQTabletAfterAlterConsumer >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> DataShardReplication::ApplyChangesToCommonTable [GOOD] >> DataShardReplication::ApplyChangesWithConcurrentTx >> Vacuum::VacuumWithoutCompaction [GOOD] >> Vacuum::MultipleVacuums >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections >> TestPurecalcFilter::Watermark [GOOD] >> NodeWardenDsProxyConfigRetrieval::Disconnect >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection >> TestPurecalcFilter::WatermarkWhere >> TMemoryController::Counters [GOOD] >> TMemoryController::Counters_HardLimit >> TCreateAndDropViewTest::CreateSameViewTwice [GOOD] >> TCreateAndDropViewTest::CreateViewOccupiedName >> SequenceProxy::DropRecreate [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceproxy/ut/unittest >> SequenceProxy::DropRecreate [GOOD] Test command err: 2026-01-08T15:28:25.414609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:28:25.414703Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:25.632629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:26.276121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:555) 2026-01-08T15:28:26.532556Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:28:26.533097Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmp35VHVi/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:28:26.533723Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmp35VHVi/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmp35VHVi/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 15426061845329373671 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:28:27.643650Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:28:27.643732Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:27.775969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:28.424453Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:555) 2026-01-08T15:28:28.734291Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:28:28.734812Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmpLMcHO9/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:28:28.735225Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmpLMcHO9/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/003253/r3tmp/tmpLMcHO9/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17415207755024787353 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:28:28.991300Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropSequence, opId: 281474976715658:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp:353) 2026-01-08T15:28:29.343947Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715659:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:555) |85.5%| [TS] {BAZEL_UPLOAD} ydb/core/tx/sequenceproxy/ut/unittest |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |85.5%| [TS] {RESULT} ydb/core/tx/sequenceproxy/ut/unittest |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |85.5%| [TM] {asan, default-linux-x86_64, pic, release} ydb/library/yql/tests/sql/solomon/pytest >> test.py::test[solomon-UnknownSetting-] [GOOD] |85.5%| [TM] {BAZEL_UPLOAD} ydb/library/yql/tests/sql/solomon/pytest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection >> KeyValueGRPCService::SimpleRenameUnexistedKeyV1 [GOOD] >> KeyValueGRPCService::SimpleRenameUnexistedKeyV2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut_sequence/unittest >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] Test command err: Caught NodeWarden registration actorId# [1:11:2058] 2026-01-08T15:28:29.809408Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:348} Bootstrap 2026-01-08T15:28:29.844496Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskGuid: 15235435468852645097 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 15235435468852645097 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 15235435468852645097 } } } } AvailabilityDomains: 0 } 2026-01-08T15:28:29.844801Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:235} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2026-01-08T15:28:29.845728Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:124} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2026-01-08T15:28:29.846132Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:69} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 15235435468852645097 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2026-01-08T15:28:29.847249Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:287} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 15235435468852645097 2026-01-08T15:28:29.847318Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:36} StartLocalProxy GroupId# 0 HasGroupInfo# true GroupInfoGeneration# 1 2026-01-08T15:28:29.848249Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:29:2076] ControllerId# 72057594037932033 2026-01-08T15:28:29.848287Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:73} SendRegisterNode 2026-01-08T15:28:29.848396Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:323} StartInvalidGroupProxy GroupId# 4294967295 2026-01-08T15:28:29.848565Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:335} StartRequestReportingThrottler 2026-01-08T15:28:29.888577Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:27} Bootstrap 2026-01-08T15:28:29.889391Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2026-01-08T15:28:29.950913Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2026-01-08T15:28:29.953081Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2026-01-08T15:28:29.953135Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:219} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2026-01-08T15:28:29.962943Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2026-01-08T15:28:29.964983Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2026-01-08T15:28:29.965047Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:8} TEvNodesInfo 2026-01-08T15:28:29.965110Z node 1 :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:26} ApplyNewNodeList NewNodeList# [[::1:12001/1:DC=1/M=1/R=1/U=1/]] 2026-01-08T15:28:29.971350Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:509} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "2x\240\215\221@\334\033\026m\336@\336P\376\013\243\203M\346" } 2026-01-08T15:28:29.973576Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2026-01-08T15:28:29.973641Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:28:29.983375Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskGuid: 15235435468852645097 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 15235435468852645097 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 15235435468852645097 } } } } AvailabilityDomains: 0 } 2026-01-08T15:28:29.983629Z node 1 :BS_NODE DEBUG: {NW110@node_warden_pdisk.cpp:540} ApplyServiceSetPDisks PDiskId# 1 NewExpectedSlotCount# 2 OldExpectedSlotCount# 2 NewSlotSizeInUnits# 0 OldSlotSizeInUnits# 0 2026-01-08T15:28:29.984097Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:28:30.003485Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1230} Handle(TEvStatusUpdate) 2026-01-08T15:28:30.005226Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1230} Handle(TEvStatusUpdate) 2026-01-08T15:28:30.023765Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:30.033498Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:30.034056Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:28:30.039072Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:30.040480Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:28:30.041426Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:28:30.041475Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:28:30.041705Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:28:30.072885Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:28:30.073043Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:28:30.073275Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:28:30.073634Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:28:30.073750Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:28:30.074525Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:28:30.125583Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:28:30.125774Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:28:30.138203Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:28:30.138364Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:28:30.138460Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:28:30.138567Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:28:30.138704Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:28:30.138788Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:28:30.138836Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:28:30.138920Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:28:30.151653Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:28:30.151816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:28:30.163992Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:28:30.164128Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:28:30.165598Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:28:30.165645Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:28:30.183110Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:28:30.183189Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:28:30.183448Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639257 Sender# [1:92:2123] SessionId# [0:0:0] Cookie# 0 Pipe connected clientId# [1:29:2076] 2026-01-08T15:28:30.183949Z node 1 :BS_NODE DEBUG: {NW05@node_warden_pipe.cpp:53} TEvTabletPipe::TEvClientConnected OK ClientId# [1:29:2076] ServerId# [1:125:2147] TabletId# 72057594037932033 PipeClientId# [1:29:2076] 2026-01-08T15:28:30.184408Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 15235435468852645097 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2026-01-08T15:28:30.184493Z node 1 :BS_CONTROLLER DEBUG: {BSCBR00@bridge.cpp:269} ApplySyncerState NodeId# 1 Update# {} Comprehensive# true 2026-01-08T15:28:30.185361Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "SectorMap:/home/runner/.ya/build/build_root/6 ... g for pipe to establish === === Breaking pipe === === Sending put === Pipe disconnected clientId# [1:29:2076] 2026-01-08T15:28:30.208886Z node 1 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [1:29:2076] ServerId# [1:125:2147] TabletId# 72057594037932033 PipeClientId# [1:29:2076] 2026-01-08T15:28:30.208972Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:139:2160] ControllerId# 72057594037932033 2026-01-08T15:28:30.209003Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:73} SendRegisterNode 2026-01-08T15:28:30.209422Z node 1 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:143} HandleForwarded GroupId# 2147483648 EnableProxyMock# false NoGroup# false 2026-01-08T15:28:30.209471Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:36} StartLocalProxy GroupId# 2147483648 HasGroupInfo# false GroupInfoGeneration# 2026-01-08T15:28:30.209508Z node 1 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:285} RequestGroupConfig GroupId# 2147483648 2026-01-08T15:28:30.209739Z node 1 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 2147483648 2026-01-08T15:28:30.210007Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:28:30.210286Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:427} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2026-01-08T15:28:30.213451Z node 1 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2147483648 Pipe connected clientId# [1:139:2160] 2026-01-08T15:28:30.214074Z node 1 :BS_NODE DEBUG: {NW05@node_warden_pipe.cpp:53} TEvTabletPipe::TEvClientConnected OK ClientId# [1:139:2160] ServerId# [1:150:2169] TabletId# 72057594037932033 PipeClientId# [1:139:2160] 2026-01-08T15:28:30.214279Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 15235435468852645097 Status: READY OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2026-01-08T15:28:30.214382Z node 1 :BS_CONTROLLER DEBUG: {BSCBR00@bridge.cpp:269} ApplySyncerState NodeId# 1 Update# {} Comprehensive# true 2026-01-08T15:28:30.214683Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } Success: true } 2026-01-08T15:28:30.214893Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:11:2058] Cookie# 0 Recipient# [1:150:2169] RecipientRewrite# [1:92:2123] Request# {NodeID: 1 GroupIDs: 2147483648 } StopGivingGroups# false 2026-01-08T15:28:30.214965Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 1 GroupIDs: 2147483648 } 2026-01-08T15:28:30.215090Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2026-01-08T15:28:30.215215Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 0 } } 2026-01-08T15:28:30.244324Z node 1 :BS_NODE DEBUG: {NW52@node_warden_impl.cpp:845} TEvControllerNodeServiceSetUpdate Record# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskGuid: 15235435468852645097 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } InstanceId: "a4571b25-ac8e641a-d74b0901-f7e8566b" Comprehensive: true AvailDomain: 0 UpdateSyncers: true } 2026-01-08T15:28:30.244562Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:863} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskGuid: 15235435468852645097 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } InstanceId: "a4571b25-ac8e641a-d74b0901-f7e8566b" Comprehensive: true AvailDomain: 0 UpdateSyncers: true } 2026-01-08T15:28:30.244814Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# true Origin# controller ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/663n/003509/r3tmp/tmpvefJP7/static.dat" PDiskGuid: 15235435468852645097 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } 2026-01-08T15:28:30.245010Z node 1 :BS_NODE DEBUG: {NW110@node_warden_pdisk.cpp:540} ApplyServiceSetPDisks PDiskId# 1 NewExpectedSlotCount# 2 OldExpectedSlotCount# 2 NewSlotSizeInUnits# 0 OldSlotSizeInUnits# 0 2026-01-08T15:28:30.245089Z node 1 :BS_NODE DEBUG: {NW110@node_warden_pdisk.cpp:540} ApplyServiceSetPDisks PDiskId# 1 NewExpectedSlotCount# 2 OldExpectedSlotCount# 2 NewSlotSizeInUnits# 0 OldSlotSizeInUnits# 0 2026-01-08T15:28:30.245192Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:69} StartLocalVDiskActor SlayInFlight# false VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 15235435468852645097 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2026-01-08T15:28:30.246136Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:287} StartLocalVDiskActor done VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 15235435468852645097 2026-01-08T15:28:30.246895Z node 1 :BS_NODE DEBUG: {NW52@node_warden_impl.cpp:845} TEvControllerNodeServiceSetUpdate Record# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } } 2026-01-08T15:28:30.247035Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:863} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } } 2026-01-08T15:28:30.247375Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 15235435468852645097 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT } } 2026-01-08T15:28:30.247540Z node 1 :BS_NODE DEBUG: {NW110@node_warden_pdisk.cpp:540} ApplyServiceSetPDisks PDiskId# 1 NewExpectedSlotCount# 2 OldExpectedSlotCount# 2 NewSlotSizeInUnits# 0 OldSlotSizeInUnits# 0 2026-01-08T15:28:30.247598Z node 1 :BS_NODE DEBUG: {NW110@node_warden_pdisk.cpp:540} ApplyServiceSetPDisks PDiskId# 1 NewExpectedSlotCount# 2 OldExpectedSlotCount# 2 NewSlotSizeInUnits# 0 OldSlotSizeInUnits# 0 2026-01-08T15:28:30.250293Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2026-01-08T15:28:30.257619Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 15235435468852645097 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2026-01-08T15:28:30.258221Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2026-01-08T15:28:30.265226Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } } 2026-01-08T15:28:30.284303Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1230} Handle(TEvStatusUpdate) 2026-01-08T15:28:30.284600Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 15235435468852645097 Status: REPLICATING OnlyPhantomsRemain: false } } 2026-01-08T15:28:30.285764Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1230} Handle(TEvStatusUpdate) 2026-01-08T15:28:30.286063Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 15235435468852645097 Status: READY OnlyPhantomsRemain: false } } 2026-01-08T15:28:30.979962Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34189869056 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 17041457152 State: Normal SlotCount: 2 SlotSizeInUnits: 0 PDiskUsage: 0.10449320794148381 } } |85.5%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/nodewarden/ut_sequence/unittest >> TTxDataShardTestInit::TestGetShardStateAfterInitialization |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |85.5%| [TM] {RESULT} ydb/library/yql/tests/sql/solomon/pytest |85.5%| [TM] {RESULT} ydb/core/blobstorage/nodewarden/ut_sequence/unittest |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection >> TSequence::SequencesIndex [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceFromSelect >> DataShardSnapshotIsolation::ReadWriteUpsertAgainTwiceOnCommitNoConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertBlockedByVolatileConflict >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount >> TTxDataShardTestInit::TestGetShardStateAfterInitialization [GOOD] >> TTxDataShardTestInit::TestTableHasPath >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed [GOOD] >> TabletService_Restart::Basics >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding >> SequenceShardTests::Basics >> TListAllTopicsTests::RecursiveList [GOOD] >> TListAllTopicsTests::ListLimitAndPaging >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings >> TestPurecalcFilter::WatermarkWhere [GOOD] >> LongTxService::BasicTransactions >> SequenceShardTests::Basics [GOOD] >> SequenceShardTests::MarkedPipeRetries >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/pqtablet/quota/ut/ydb-core-persqueue-pqtablet-quota-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/pqtablet/quota/ut/ydb-core-persqueue-pqtablet-quota-ut |85.5%| [LD] {RESULT} $(B)/ydb/core/persqueue/pqtablet/quota/ut/ydb-core-persqueue-pqtablet-quota-ut |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |85.5%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut >> Discovery::SecondDiscovererRequestBeforeNameserviceResponse [GOOD] >> TestPurecalcFilter::WatermarkWhereFalse |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |85.5%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> SequenceShardTests::MarkedPipeRetries [GOOD] >> SequenceShardTests::FreezeRestoreRedirect >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding >> Sticking::Check [GOOD] >> DescribeSchemaSecretsService::BatchRequest [GOOD] >> DescribeSchemaSecretsService::BigBatchRequest >> LongTxService::BasicTransactions [GOOD] >> LongTxService::AcquireSnapshot >> TMemoryController::Counters_HardLimit [GOOD] >> TMemoryController::Counters_NoHardLimit >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] >> SequenceShardTests::FreezeRestoreRedirect [GOOD] >> SequenceShardTests::NegativeIncrement >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding >> Vacuum::MultipleVacuums [GOOD] >> Vacuum::MultipleVacuumsWithOldGenerations >> Graph::LocalBackendFullCycle [GOOD] >> Graph::MemoryBordersOnGet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/discovery/unittest >> Discovery::SecondDiscovererRequestBeforeNameserviceResponse [GOOD] Test command err: Trying to start YDB, gRPC: 3234, MsgBus: 18744 2026-01-08T15:28:17.609152Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012971458812391:2129];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:17.609202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:17.880848Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:17.910706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:17.910816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:17.966763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:17.978201Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:18.014489Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012971458812303:2081] 1767886097582979 != 1767886097582982 2026-01-08T15:28:18.075188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:18.075213Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:18.075221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:18.075291Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:18.078205Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:18.444325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:18.500810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:18.618501Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:18.621075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:18.755197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:18.829954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.134399Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012988638683361:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.134541Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.136234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012988638683371:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.136333Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.441543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.479966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.510880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.539075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.572567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.621125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.657934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.705378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.789291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012988638684244:2483], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.789386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.789485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012988638684249:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.789633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012988638684251:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.789679Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.792672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:21.802270Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012988638684253:2488], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:28:21.858754Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012988638684304:3772] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:28:22.609547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012971458812391:2129];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:22.609617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 24409, MsgBus: 19989 2026-01-08T15:28:25.566140Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:25.578451Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:25.649405Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:25.651871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:25.670533Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013006695031708:2081] 1767886105354747 != 1767886105354750 2026-01-08T15:28:25.693592Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:25.702903Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:25.798243Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:25.862931Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:25.862956Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:25.862964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:25.863034Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:26.222424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:26.237706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:26.296620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:26.446353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:26.494294Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:26.530559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.211908Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013023874902769:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.212018Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.212516Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013023874902779:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.212563Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.324945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.397235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.468531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.545631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.644466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.740482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.812200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.895190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:30.046767Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013028169870947:2483], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:30.046888Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:30.047188Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013028169870952:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:30.047234Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013028169870953:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:30.047509Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:30.052662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:30.076445Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013028169870956:2488], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:28:30.169679Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013028169871007:3777] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |85.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/discovery/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery >> TCreateAndDropViewTest::CreateViewOccupiedName [GOOD] >> SequenceShardTests::NegativeIncrement [GOOD] >> TCreateAndDropViewTest::CreateViewIfNotExists >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction [GOOD] >> TTxDataShardFilterKMeansScan::BadRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/library/actors/interconnect/ut/unittest >> Sticking::Check [GOOD] Test command err: 0: ch1=8074 mean# 8074 dev# 0 part# 0 1: ch1=8074 ch2=8074 mean# 8074 dev# 0 part# 0 2: ch1=13338 ch2=10884 mean# 12111 dev# 1227 part# 0.1013128561 3: ch1=18148 ch2=14148 mean# 16148 dev# 2000 part# 0.1238543473 4: ch1=20188 ch2=20182 mean# 20185 dev# 3 part# 0.0001486252167 5: ch1=27764 ch2=20680 mean# 24222 dev# 3542 part# 0.1462306994 6: ch1=28222 ch2=28296 mean# 28259 dev# 37 part# 0.001309317386 7: ch1=34120 ch2=30472 mean# 32296 dev# 1824 part# 0.05647758236 8: ch1=37842 ch2=34824 mean# 36333 dev# 1509 part# 0.04153249112 9: ch1=40380 ch2=40360 mean# 40370 dev# 10 part# 0.0002477086946 10: ch1=47458 ch2=41356 mean# 44407 dev# 3051 part# 0.06870538429 11: ch1=49004 ch2=47884 mean# 48444 dev# 560 part# 0.01155973908 12: ch1=53814 ch2=51148 mean# 52481 dev# 1333 part# 0.02539966845 13: ch1=58624 ch2=54412 mean# 56518 dev# 2106 part# 0.03726246506 14: ch1=60572 ch2=60538 mean# 60555 dev# 17 part# 0.0002807365205 15: ch1=68240 ch2=60944 mean# 64592 dev# 3648 part# 0.05647758236 16: ch1=68698 ch2=68560 mean# 68629 dev# 69 part# 0.001005405878 17: ch1=74596 ch2=70736 mean# 72666 dev# 1930 part# 0.0265598767 18: ch1=78318 ch2=75088 mean# 76703 dev# 1615 part# 0.02105523904 19: ch1=80764 ch2=80716 mean# 80740 dev# 24 part# 0.0002972504335 20: ch1=87934 ch2=81620 mean# 84777 dev# 3157 part# 0.03723887375 21: ch1=89480 ch2=88148 mean# 88814 dev# 666 part# 0.007498817754 22: ch1=94290 ch2=91412 mean# 92851 dev# 1439 part# 0.01549794833 23: ch1=99100 ch2=94676 mean# 96888 dev# 2212 part# 0.02283048468 24: ch1=100956 ch2=100894 mean# 100925 dev# 31 part# 0.0003071587813 25: ch1=108716 ch2=101208 mean# 104962 dev# 3754 part# 0.03576532459 26: ch1=109174 ch2=108824 mean# 108999 dev# 175 part# 0.001605519317 27: ch1=113984 ch2=112088 mean# 113036 dev# 948 part# 0.008386708659 28: ch1=119882 ch2=114264 mean# 117073 dev# 2809 part# 0.02399357666 29: ch1=121148 ch2=121072 mean# 121110 dev# 38 part# 0.0003137643465 30: ch1=128410 ch2=121884 mean# 125147 dev# 3263 part# 0.02607333775 31: ch1=121882 ch2=128412 mean# 125147 dev# 3265 part# 0.02608931896 32: ch1=126692 ch2=123602 mean# 125147 dev# 1545 part# 0.01234548171 33: ch1=126238 ch2=124056 mean# 125147 dev# 1091 part# 0.008717747928 34: ch1=123192 ch2=127102 mean# 125147 dev# 1955 part# 0.01562162896 35: ch1=129004 ch2=121290 mean# 125147 dev# 3857 part# 0.03081975597 36: ch1=121428 ch2=128866 mean# 125147 dev# 3719 part# 0.02971705275 37: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 38: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 39: ch1=123686 ch2=126608 mean# 125147 dev# 1461 part# 0.01167427106 40: ch1=128502 ch2=121792 mean# 125147 dev# 3355 part# 0.02680847324 41: ch1=121882 ch2=128412 mean# 125147 dev# 3265 part# 0.02608931896 42: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 43: ch1=125146 ch2=125148 mean# 125147 dev# 1 part# 7.990603051e-06 44: ch1=123096 ch2=127198 mean# 125147 dev# 2051 part# 0.01638872686 45: ch1=129092 ch2=121202 mean# 125147 dev# 3945 part# 0.03152292904 46: ch1=121424 ch2=128870 mean# 125147 dev# 3723 part# 0.02974901516 47: ch1=126230 ch2=124064 mean# 125147 dev# 1083 part# 0.008653823104 48: ch1=125142 ch2=125152 mean# 125147 dev# 5 part# 3.995301525e-05 49: ch1=123590 ch2=126704 mean# 125147 dev# 1557 part# 0.01244136895 50: ch1=128590 ch2=121704 mean# 125147 dev# 3443 part# 0.0275116463 51: ch1=121878 ch2=128416 mean# 125147 dev# 3269 part# 0.02612128137 52: ch1=126230 ch2=124064 mean# 125147 dev# 1083 part# 0.008653823104 53: ch1=125142 ch2=125152 mean# 125147 dev# 5 part# 3.995301525e-05 54: ch1=123000 ch2=127244 mean# 125122 dev# 2122 part# 0.01695944758 55: ch1=128046 ch2=122198 mean# 125122 dev# 2924 part# 0.02336919167 56: ch1=121832 ch2=128412 mean# 125122 dev# 3290 part# 0.02629433673 57: ch1=126184 ch2=124060 mean# 125122 dev# 1062 part# 0.008487715989 58: ch1=126184 ch2=124060 mean# 125122 dev# 1062 part# 0.008487715989 59: ch1=122410 ch2=127834 mean# 125122 dev# 2712 part# 0.02167484535 60: ch1=128636 ch2=121608 mean# 125122 dev# 3514 part# 0.02808458944 61: ch1=121832 ch2=128412 mean# 125122 dev# 3290 part# 0.02629433673 62: ch1=126184 ch2=124060 mean# 125122 dev# 1062 part# 0.008487715989 63: ch1=125096 ch2=125148 mean# 125122 dev# 26 part# 0.0002077971899 64: ch1=122908 ch2=127336 mean# 125122 dev# 2214 part# 0.01769472994 65: ch1=128138 ch2=122106 mean# 125122 dev# 3016 part# 0.02410447403 66: ch1=121832 ch2=128412 mean# 125122 dev# 3290 part# 0.02629433673 67: ch1=126642 ch2=123602 mean# 125122 dev# 1520 part# 0.01214814341 68: ch1=126188 ch2=124056 mean# 125122 dev# 1066 part# 0.008519684788 69: ch1=122322 ch2=127922 mean# 125122 dev# 2800 part# 0.02237815892 70: ch1=128732 ch2=121512 mean# 125122 dev# 3610 part# 0.0288518406 71: ch1=121836 ch2=128408 mean# 125122 dev# 3286 part# 0.02626236793 72: ch1=126188 ch2=124056 mean# 125122 dev# 1066 part# 0.008519684788 73: ch1=126188 ch2=124056 mean# 125122 dev# 1066 part# 0.008519684788 74: ch1=123908 ch2=126336 mean# 125122 dev# 1214 part# 0.00970253033 75: ch1=128234 ch2=122010 mean# 125122 dev# 3112 part# 0.0248717252 76: ch1=121836 ch2=128408 mean# 125122 dev# 3286 part# 0.02626236793 77: ch1=126646 ch2=123598 mean# 125122 dev# 1524 part# 0.01218011221 78: ch1=126192 ch2=124052 mean# 125122 dev# 1070 part# 0.008551653586 79: ch1=123322 ch2=126922 mean# 125122 dev# 1800 part# 0.0143859593 80: ch1=128828 ch2=121416 mean# 125122 dev# 3706 part# 0.02961909177 81: ch1=121840 ch2=128404 mean# 125122 dev# 3282 part# 0.02623039913 82: ch1=126192 ch2=124052 mean# 125122 dev# 1070 part# 0.008551653586 83: ch1=126192 ch2=124052 mean# 125122 dev# 1070 part# 0.008551653586 84: ch1=123820 ch2=126424 mean# 125122 dev# 1302 part# 0.0104058439 85: ch1=128330 ch2=121964 mean# 125147 dev# 3183 part# 0.02543408951 86: ch1=121886 ch2=128408 mean# 125147 dev# 3261 part# 0.02605735655 87: ch1=126238 ch2=124056 mean# 125147 dev# 1091 part# 0.008717747928 88: ch1=126238 ch2=124056 mean# 125147 dev# 1091 part# 0.008717747928 89: ch1=123276 ch2=127018 mean# 125147 dev# 1871 part# 0.01495041831 90: ch1=128920 ch2=121374 mean# 125147 dev# 3773 part# 0.03014854531 91: ch1=121428 ch2=128866 mean# 125147 dev# 3719 part# 0.02971705275 92: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 93: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 94: ch1=123770 ch2=126524 mean# 125147 dev# 1377 part# 0.0110030604 95: ch1=128418 ch2=121876 mean# 125147 dev# 3271 part# 0.02613726258 96: ch1=121882 ch2=128412 mean# 125147 dev# 3265 part# 0.02608931896 97: ch1=126234 ch2=124060 mean# 125147 dev# 1087 part# 0.008685785516 98: ch1=125146 ch2=125148 mean# 125147 dev# 1 part# 7.990603051e-06 99: ch1=123180 ch2=127114 mean# 125147 dev# 1967 part# 0.0157175162 100: ch1=126832 ch2=122374 ch3=1088 mean# 83431.33333 dev# 58253.9661 part# 0.6982264788 101: ch1=119248 ch2=125606 ch3=5440 mean# 83431.33333 dev# 55209.25102 part# 0.6617328145 102: ch1=123198 ch2=119712 ch3=7384 mean# 83431.33333 dev# 53792.41415 part# 0.6447507429 103: ch1=118610 ch2=120800 ch3=10884 mean# 83431.33333 dev# 51306.5019 part# 0.6149548359 104: ch1=120786 ch2=118624 ch3=10884 mean# 83431.33333 dev# 51306.30397 part# 0.6149524636 105: ch1=119344 ch2=115714 ch3=15236 mean# 83431.33333 dev# 48244.14881 part# 0.578249764 106: ch1=112258 ch2=118624 ch3=19412 mean# 83431.33333 dev# 45343.04628 part# 0.5434774259 107: ch1=115342 ch2=114272 ch3=20680 mean# 83431.33333 dev# 44374.04347 part# 0.5318630507 108: ch1=112078 ch2=114272 ch3=23944 mean# 83431.33333 dev# 42073.43207 part# 0.504288142 109: ch1=112988 ch2=112096 ch3=25210 mean# 83431.33333 dev# 41170.31015 part# 0.4934634089 110: ch1=111048 ch2=109858 ch3=29388 mean# 83431.33333 dev# 38217.49541 part# 0.4580712531 111: ch1=107722 ch2=111008 ch3=31564 mean# 83431.33333 dev# 36700.26929 part# 0.4398859257 112: ch1=107264 ch2=107744 ch3=35286 mean# 83431.33333 dev# 34044.45566 part# 0.4080535968 113: ch1=108196 ch2=105568 ch3=36530 mean# 83431.33333 dev# 33181.6003 part# 0.3977114948 114: ch1=102298 ch2=107660 ch3=40336 mean# 83431.33333 dev# 30551.52563 part# 0.3661876708 115: ch1=106818 ch2=102108 ch3=41368 mean# 83431.33333 dev# 29805.35783 part# 0.3572441748 116: ch1=99640 ch2=104934 ch3=45720 mean# 83431.33333 dev# 26753.38116 part# 0.3206634737 117: ch1=103404 ch2=99078 ch3=47812 mean# 83431.33333 dev# 25248.5148 part# 0.3026262891 118: ch1=97914 ch2=101220 ch3=51160 mean# 83431.33333 dev# 22859.15754 part# 0.273987681 119: ch1=100090 ch2=97956 ch3=52248 mean# 83431.33333 dev# 22067.15051 part# 0.2644947603 120: ch1=98242 ch2=96082 ch3=55970 mean# 83431.33333 dev# 19438.10727 part# 0.2329832989 121: ch1=92474 ch2=99040 ch3=58780 mean# 83431.33333 dev# 17636.02875 part# 0.2113837578 122: ch1=96196 ch2=93142 ch3=60956 mean# 83431.33333 dev# 15941.29199 part# 0.1910708046 123: ch1=92478 ch2=93596 ch3=64220 mean# 83431.33333 dev# 13592.12952 part# 0.1629139674 124: ch1=91390 ch2=92508 ch3=66396 mean# 83431.33333 dev# 12054.44364 part# 0.1444834112 125: ch1=90132 ch2=90044 ch3=70118 mean# 83431.33333 dev# 9414.016831 part# 0.1128355074 126: ch1=87038 ch2=91416 ch3=71840 mean# 83431.33333 dev# 8388.920365 part# 0.100548799 127: ch1=88126 ch2=87064 ch3=75104 mean# 83431.33333 dev# 5904.253909 part# 0.07076782395 128: ch1=87330 ch2=85976 ch3=76988 mean# 83431.33333 dev# 4589.534278 part# 0.05500971991 129: ch1=83608 ch2=85976 ch3=80710 mean# 83431.33333 dev# 2153.461916 part# 0.02581118903 130: ch1=86820 ch2=81834 ch3=81640 mean# 83431.33333 dev# 2397.457728 part# 0.02873569955 131: ch1=81136 ch2=84254 ch3=84904 mean# 83431.33333 dev# 1644.595459 part# 0.01971196423 132: ch1=85172 ch2=82394 ch3=82728 mean# 83431.33333 dev# 1238.367025 part# 0.01484294899 133: ch1=81358 ch2=84888 ch3=84048 mean# 83431.33333 dev# 1505.641244 part# 0.01804647228 134: ch1=87034 ch2=81624 ch3=81636 mean# 83431.33333 dev# 2547.474741 part# 0.03053378915 135: ch1=78960 ch2=85346 ch3=85988 mean# 83431.33333 dev# 3172.554947 part# 0.03802594085 136: ch1=82564 ch2=83918 ch3=83812 mean# 83431.33333 dev# 614.8221062 part# 0.007369199096 137: ch1=84678 ch2=82716 ch3=82900 mean# 83431.33333 dev# 884.7211739 part# 0.01060418357 138: ch1=84858 ch2=82716 ch3=82720 mean# 83431.33333 dev# 1008.806996 part# 0.01209146439 139: ch1=83312 ch2=83174 ch3=83808 mean# 83431.33333 dev# 272.236825 part# 0.003263004607 140: ch1=80764 ch2=84812 ch3=84718 mean# 83431.33333 dev# 1886.479849 part# 0.02261116746 141: ch1=84402 ch2=83176 ch3=82716 mean# 83431.33333 dev# 711.5922678 part# 0.008529077019 142: ch1=81598 ch2=84892 ch3=83804 mean# 83431.33333 dev# 1370.345293 part# 0.01642482792 143: ch1=86408 ch2=81628 ch3=82258 mean# 83431.33333 dev# 2120.476886 part# 0.02541583361 144: ch1=80666 ch2=84892 ch3=84736 mean# 83431.33333 dev# 1956.422813 part# 0.02344949714 145: ch1=87018 ch2=81712 ch3=81564 mean# 83431.33333 dev# 2536.875944 part# 0.03040675299 146: ch1=81148 ch2=84262 ch3=84884 mean# 83431.33333 dev# 1634.406994 part# 0.01958984627 147: ch1=84236 ch2=83350 ch3=82708 mean# 83431.33333 dev# 626.4488983 part# 0.007508556717 148: ch1=81560 ch2=84854 ch3=83880 mean# 83431.33333 dev# 1381.686248 part# 0.01656075953 149: ch1=87050 ch2=81624 ch3=81620 mean# 83431.33333 dev# 2558.78426 part# 0.03066934397 150: ch1=80064 ch2=85346 ch3=84884 mean# 83431.33333 dev# 2388.522742 part# 0.02862860566 151: ch1=83478 ch2=83478 ch3=83338 mean# 83431.33333 dev# 65.99663291 part# 0.0007910293444 152: ch1=83782 ch2=82720 ch3=83792 mean# 83431.33333 dev# 503.005191 part# 0.006028972221 153: ch1=82694 ch2=83808 ch3=83792 mean# 83431.33333 dev# 521.414316 part# 0.006249622236 154: ch1=84870 ch2=82720 ch3=82704 mean# 83431.33333 dev# 1017.311926 part# 0.01219340367 155: ch1=80518 ch2=84896 ch3=84880 mean# 83431.33333 dev# 2060.048112 part# 0.02469154009 156: ch1=85328 ch2=82720 ch3=82246 mean# 83431.33333 dev# 1355.034399 part# 0.016241313 157: ch1=82698 ch2=83808 ch3=83788 mean# 83431.33333 dev# 518.6092513 part# 0.006216000999 158: ... 95061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes written 4195061 bytes 2026-01-08T15:28:25.530162Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [1:7593013007490318886:2048] [node 2] ICP01 ready to work 2026-01-08T15:28:25.538500Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [2:7593013004094848059:2048] [node 1] ICP01 ready to work No RDMA context found for GID: ::ffff:127.1.0.0 2026-01-08T15:28:25.550875Z :INTERCONNECT WARN: interconnect_handshake.cpp:1390: Handshake [2:7593013004094848068:2056] [node 0] ICRDMA Unable to find verbs context using address 127.1.0.0:31732 2026-01-08T15:28:25.566680Z :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [2:7593013004094848068:2056] [node 1] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:28:25.568463Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:220: Proxy [2:7593013004094848059:2048] [node 1] ICP08 No active sessions, becoming PendingConnection 2026-01-08T15:28:25.568918Z :INTERCONNECT WARN: interconnect_handshake.cpp:542: Handshake [1:7593013007490318896:2056] [node 2] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2026-01-08T15:28:25.568962Z :INTERCONNECT ERROR: interconnect_handshake.cpp:955: Handshake [1:7593013007490318896:2056] [node 2] ICRDMA Non success qp response from remote side 2026-01-08T15:28:25.570162Z :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [1:7593013007490318896:2056] [node 2] ICH04 handshake succeeded 2026-01-08T15:28:25.570499Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:339: Proxy [1:7593013007490318886:2048] [node 2] ICP20 outgoing handshake succeeded 2026-01-08T15:28:25.570558Z :INTERCONNECT_STATUS INFO: interconnect_tcp_session.cpp:66: [2] session created 2026-01-08T15:28:25.570606Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [1:7593013007490318886:2048] [node 2] ICP22 created new session: [1:7593013007490318899:2048] 2026-01-08T15:28:25.570648Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [1:7593013007490318899:2048] [node 2] ICS09 handshake done sender: [1:7593013007490318896:2056] self: [1:7593013007490318895:0] peer: [2:7593013004094848069:0] socket: 24 qp: -1 2026-01-08T15:28:25.570694Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [1:7593013007490318899:2048] [node 2] ICS10 traffic start 2026-01-08T15:28:25.570731Z :INTERCONNECT_STATUS INFO: interconnect_tcp_session.cpp:329: [2] connected No RDMA context found for GID: ::ffff:127.1.0.0 2026-01-08T15:28:25.572536Z :INTERCONNECT WARN: interconnect_handshake.cpp:1390: Handshake [2:7593013004094848070:2057] [node 0] ICRDMA Unable to find verbs context using address 127.1.0.0:31732 2026-01-08T15:28:25.573087Z :INTERCONNECT INFO: interconnect_handshake.cpp:414: Handshake [2:7593013004094848068:2056] [node 1] ICH04 handshake succeeded 2026-01-08T15:28:25.573251Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:335: Proxy [2:7593013004094848059:2048] [node 1] ICP19 incoming handshake succeeded 2026-01-08T15:28:25.573321Z :INTERCONNECT_STATUS INFO: interconnect_tcp_session.cpp:66: [1] session created 2026-01-08T15:28:25.573376Z :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:378: Proxy [2:7593013004094848059:2048] [node 1] ICP22 created new session: [2:7593013004094848071:2048] 2026-01-08T15:28:25.573435Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:278: Session [2:7593013004094848071:2048] [node 1] ICS09 handshake done sender: [2:7593013004094848068:2056] self: [2:7593013004094848069:0] peer: [1:7593013007490318895:0] socket: 25 qp: -1 2026-01-08T15:28:25.573472Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:303: Session [2:7593013004094848071:2048] [node 1] ICS10 traffic start 2026-01-08T15:28:25.573526Z :INTERCONNECT_STATUS INFO: interconnect_tcp_session.cpp:329: [1] connected Updated MaxRTT# 0.035291s Updated MaxRTT# 0.072168s 2026-01-08T15:28:35.602517Z :INTERCONNECT_NETWORK NOTICE: interconnect_tcp_input_session.cpp:975: [2 <-> 1] connection closed by peer 2026-01-08T15:28:35.602798Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:552: Session [2:7593013004094848071:2048] [node 1] ICS07 socket disconnect 25 reason# EndOfStream 2026-01-08T15:28:35.602828Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:571: Session [2:7593013004094848071:2048] [node 1] ICS25 shutdown socket, reason# EndOfStream 2026-01-08T15:28:35.602915Z :INTERCONNECT_STATUS INFO: interconnect_tcp_session.cpp:581: [1] disconnected 2026-01-08T15:28:35.602955Z :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:524: Session [2:7593013004094848071:2048] [node 1] ICS15 start handshake |85.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |85.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |85.5%| [TM] {RESULT} ydb/core/kqp/ut/discovery/unittest |85.5%| [TM] {RESULT} ydb/library/actors/interconnect/ut/unittest |85.5%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |85.5%| [TM] {BAZEL_UPLOAD} ydb/library/actors/interconnect/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries >> TQuotaTracker::TestSmallMessages [GOOD] >> TQuotaTracker::TestBigMessages [GOOD] >> TWriteQuoterTests::WaitDeduplicationIdQuota >> TTestYqlToMiniKQLCompile::CheckResolve [GOOD] >> TTestYqlToMiniKQLCompile::OnlyResult >> TTxDataShardTestInit::TestTableHasPath [GOOD] >> TTxDataShardTestInit::TestResolvePathAfterRestart >> TTestYqlToMiniKQLCompile::OnlyResult [GOOD] >> TTestYqlToMiniKQLCompile::EraseRow >> KeyValueGRPCService::SimpleRenameUnexistedKeyV2 [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKeyV1 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/ut/unittest >> SequenceShardTests::NegativeIncrement [GOOD] Test command err: 2026-01-08T15:28:34.282590Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2026-01-08T15:28:34.282744Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2026-01-08T15:28:34.295204Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2026-01-08T15:28:34.298968Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2026-01-08T15:28:34.299053Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2026-01-08T15:28:34.304816Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2026-01-08T15:28:34.304999Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 1 Cache# 1 Increment# 1 Cycle# false State# Active 2026-01-08T15:28:34.338815Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2026-01-08T15:28:34.339281Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2026-01-08T15:28:34.339335Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:33: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SEQUENCE_ALREADY_EXISTS PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.339402Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2026-01-08T15:28:34.339668Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } StartValue: 100001 Cache: 10 2026-01-08T15:28:34.339797Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 100001 Cache# 10 Increment# 1 Cycle# false State# Active 2026-01-08T15:28:34.352122Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2026-01-08T15:28:34.352542Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:34.352647Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 1 AllocationCount# 1 AllocationIncrement# 1 2026-01-08T15:28:34.365555Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.365977Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 10 2026-01-08T15:28:34.366099Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 2 AllocationCount# 10 AllocationIncrement# 1 2026-01-08T15:28:34.378164Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.378515Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2026-01-08T15:28:34.378607Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100001 AllocationCount# 10 AllocationIncrement# 1 2026-01-08T15:28:34.395358Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.395812Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 50 2026-01-08T15:28:34.395917Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100011 AllocationCount# 50 AllocationIncrement# 1 2026-01-08T15:28:34.411313Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.411831Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 99] Cache# 0 2026-01-08T15:28:34.411880Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 99] 2026-01-08T15:28:34.411946Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.412178Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2026-01-08T15:28:34.412288Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 12 AllocationCount# 9223372036854775796 AllocationIncrement# 1 2026-01-08T15:28:34.427307Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.427736Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2026-01-08T15:28:34.427792Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.427850Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.428105Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.428182Z node 1 :SEQUENCESHARD NOTICE: tx_drop_sequence.cpp:43: [sequenceshard 72057594037927937] TTxDropSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.441309Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2026-01-08T15:28:34.441698Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.441753Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:33: [sequenceshard 72057594037927937] TTxDropSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.441821Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2026-01-08T15:28:34.457238Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2026-01-08T15:28:34.457365Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2026-01-08T15:28:34.457863Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2026-01-08T15:28:34.458608Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2026-01-08T15:28:34.458812Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2026-01-08T15:28:34.464541Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:34.464617Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:34.464675Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.464946Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2026-01-08T15:28:34.465073Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100061 AllocationCount# 10 AllocationIncrement# 1 2026-01-08T15:28:34.499865Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.500558Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } NextValue: 200000 NextUsed: true 2026-01-08T15:28:34.500678Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2026-01-08T15:28:34.513154Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2026-01-08T15:28:34.513630Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2026-01-08T15:28:34.513788Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200001 AllocationCount# 10 AllocationIncrement# 1 2026-01-08T15:28:34.526621Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:34.527190Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } Cache: 5 2026-01-08T15:28:34.527306Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2026-01-08T15:28:34.550560Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2026-01-08T15:28:34.551072Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2026-01-08T15:28:34.551186Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200011 AllocationCount# 5 AllocationIncrement# 1 2026-01-08T15:28:34.567680Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard ... Id: 43] Cache# 0 2026-01-08T15:28:35.892495Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] AllocationStart# 11 AllocationCount# 100 AllocationIncrement# 1 2026-01-08T15:28:35.905728Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:35.906282Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] Record# PathId { OwnerId: 123 LocalId: 43 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 11 Cache: 100 Increment: 1 2026-01-08T15:28:35.906356Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:66: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SEQUENCE_ALREADY_ACTIVE PathId# [OwnerId: 123, LocalPathId: 43] 2026-01-08T15:28:35.906426Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2026-01-08T15:28:35.906681Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.906778Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.920837Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2026-01-08T15:28:35.921245Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.921336Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.934392Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2026-01-08T15:28:35.934735Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.934821Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2026-01-08T15:28:35.949120Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2026-01-08T15:28:35.949523Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:35.949577Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:54: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 42] MovedTo# 12345 2026-01-08T15:28:35.949658Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:35.949941Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2026-01-08T15:28:35.950038Z node 3 :SEQUENCESHARD NOTICE: tx_freeze_sequence.cpp:68: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] 2026-01-08T15:28:35.962224Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2026-01-08T15:28:35.962821Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2026-01-08T15:28:35.962995Z node 3 :SEQUENCESHARD NOTICE: tx_restore_sequence.cpp:98: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2026-01-08T15:28:35.975269Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2026-01-08T15:28:35.975602Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2026-01-08T15:28:35.975674Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2026-01-08T15:28:35.989169Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2026-01-08T15:28:35.989491Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2026-01-08T15:28:35.989531Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:48: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 43] MovedTo# 54321 2026-01-08T15:28:35.989579Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2026-01-08T15:28:35.989795Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:35.989869Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 111 AllocationCount# 100 AllocationIncrement# 1 2026-01-08T15:28:36.002486Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.360354Z node 4 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2026-01-08T15:28:36.360484Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2026-01-08T15:28:36.373554Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2026-01-08T15:28:36.377653Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2026-01-08T15:28:36.377740Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2026-01-08T15:28:36.379946Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cache: 10 Increment: -1 2026-01-08T15:28:36.380082Z node 4 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# -9223372036854775808 MaxValue# -1 StartValue# -1 Cache# 10 Increment# -1 Cycle# false State# Active 2026-01-08T15:28:36.415158Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2026-01-08T15:28:36.415560Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:36.415685Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2026-01-08T15:28:36.431261Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.431722Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:36.431850Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2026-01-08T15:28:36.445955Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.446404Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2026-01-08T15:28:36.446525Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -21 AllocationCount# 9223372036854775788 AllocationIncrement# -1 2026-01-08T15:28:36.458962Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.459327Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2026-01-08T15:28:36.459380Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:36.459442Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.459740Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cycle: true 2026-01-08T15:28:36.459834Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2026-01-08T15:28:36.472209Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2026-01-08T15:28:36.472623Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:36.472731Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2026-01-08T15:28:36.485222Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2026-01-08T15:28:36.485656Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2026-01-08T15:28:36.485778Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2026-01-08T15:28:36.506984Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete |85.5%| [TS] {BAZEL_UPLOAD} ydb/core/tx/sequenceshard/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_replication/unittest >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:27:09.914894Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:10.071215Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:10.071631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:27:10.071809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:10.375656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:10.375791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:10.455773Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886018650461 != 1767886018650465 2026-01-08T15:27:10.471439Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:10.519542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:10.622710Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:27:10.909883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:10.924232Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:11.036619Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:27:11.036702Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:27:11.036838Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:861:2744] 2026-01-08T15:27:11.214816Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:861:2744] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:27:11.214926Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:861:2744] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:27:11.215558Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:27:11.215656Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:861:2744] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:27:11.216005Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:27:11.216166Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:861:2744] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:27:11.216300Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:861:2744] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:27:11.218265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:27:11.218749Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:27:11.219493Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:861:2744] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:27:11.219576Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:861:2744] txid# 281474976715657 SEND to# [1:829:2724] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:27:11.252073Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:27:11.253104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:27:11.253366Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:27:11.253635Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:27:11.298592Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:27:11.299336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:27:11.299434Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:27:11.301077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:27:11.301161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:27:11.301235Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:27:11.301585Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:27:11.301746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:27:11.301838Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:27:11.312692Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:27:11.348641Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:27:11.348862Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:27:11.349012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:27:11.349060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:27:11.349096Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:27:11.349131Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:27:11.349387Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:11.349432Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:11.349789Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:27:11.349909Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:27:11.350009Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:27:11.350051Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:27:11.350105Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:27:11.350156Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:27:11.350190Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:27:11.350220Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:27:11.350263Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:27:11.350711Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:11.350752Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:11.350814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:27:11.350910Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:27:11.350944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:27:11.351037Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:27:11.351249Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:27:11.351305Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:27:11.351402Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026 ... :34.822337Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2026-01-08T15:28:34.822393Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:28:34.822421Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:28:34.822445Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:28:34.822469Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:28:34.822598Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2026-01-08T15:28:34.822988Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2672: 72075186224037888 Acquired lock# 281474976715660, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 38] 2026-01-08T15:28:34.823061Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2026-01-08T15:28:34.823125Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[8:1068:2888], 0} after executionsCount# 1 2026-01-08T15:28:34.823205Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[8:1068:2888], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:28:34.823302Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[8:1068:2888], 0} finished in read 2026-01-08T15:28:34.823408Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:28:34.823438Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:28:34.823467Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:28:34.823496Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:28:34.823548Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:28:34.823572Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:28:34.823608Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:2] at 72075186224037888 has finished 2026-01-08T15:28:34.823654Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:28:34.823781Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:28:34.824037Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 275709965, Sender [8:70:2117], Recipient [8:886:2765]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715660 LockNode: 8 Status: STATUS_SUBSCRIBED 2026-01-08T15:28:34.825480Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [8:1068:2888], Recipient [8:886:2765]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:28:34.825559Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } } 2026-01-08T15:28:34.828290Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [8:1072:2893], Recipient [8:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:34.828362Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:34.828421Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [8:1071:2892], serverId# [8:1072:2893], sessionId# [0:0:0] 2026-01-08T15:28:34.828627Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549570, Sender [8:1070:2891], Recipient [8:886:2765]: NKikimrTxDataShard.TEvApplyReplicationChanges TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Source: "my-source" Changes { SourceOffset: 1 WriteTxId: 0 Key: "\001\000\004\000\000\000\001\000\000\000" Upsert { Tags: 2 Data: "\001\000\004\000\000\000\025\000\000\000" } } 2026-01-08T15:28:34.828831Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v2500/18446744073709551615 ImmediateWriteEdge# v2000/18446744073709551615 ImmediateWriteEdgeReplied# v2000/18446744073709551615 2026-01-08T15:28:34.828979Z node 8 :TX_DATASHARD TRACE: locks.cpp:194: Lock 281474976715660 marked broken at v{min} 2026-01-08T15:28:34.840280Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:2572: Waiting for PlanStep# 2501 from mediator time cast 2026-01-08T15:28:34.841196Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 270270977, Sender [8:26:2073], Recipient [8:886:2765]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 2501} 2026-01-08T15:28:34.841250Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3193: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2026-01-08T15:28:34.841293Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3810: Notified by mediator time cast with PlanStep# 2501 at tablet 72075186224037888 2026-01-08T15:28:34.841355Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:35.070007Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [8:1094:2910], Recipient [8:886:2765]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2026-01-08T15:28:35.070200Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:28:35.070295Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2026-01-08T15:28:35.070434Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:28:35.070500Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:28:35.070555Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:28:35.070597Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:28:35.070683Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2026-01-08T15:28:35.070742Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:28:35.070770Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:28:35.070807Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:28:35.070835Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:28:35.070987Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2026-01-08T15:28:35.071285Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2672: 72075186224037888 Acquired lock# 281474976715660, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 38] 2026-01-08T15:28:35.071351Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2026-01-08T15:28:35.071416Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[8:1094:2910], 0} after executionsCount# 1 2026-01-08T15:28:35.071476Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[8:1094:2910], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:28:35.071577Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[8:1094:2910], 0} finished in read 2026-01-08T15:28:35.071656Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:28:35.071687Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:28:35.071713Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:28:35.071742Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:28:35.071791Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:28:35.071815Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:28:35.071851Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:3] at 72075186224037888 has finished 2026-01-08T15:28:35.071904Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:28:35.072084Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:28:35.073327Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [8:1094:2910], Recipient [8:886:2765]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:28:35.073403Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } } >> TTestYqlToMiniKQLCompile::EraseRow [GOOD] >> TTestYqlToMiniKQLCompile::UpdateRow |85.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_replication/unittest >> TGRpcRateLimiterTest::CreateResource >> TTestYqlToMiniKQLCompile::UpdateRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRow >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery |85.6%| [TS] {RESULT} ydb/core/tx/sequenceshard/ut/unittest |85.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_replication/unittest >> TTestYqlToMiniKQLCompile::SelectRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRange >> TTestYqlToMiniKQLCompile::SelectRange [GOOD] >> TTestYqlToMiniKQLCompile::SimpleCrossShardTx [GOOD] >> TTestYqlToMiniKQLCompile::AcquireLocks >> TTestYqlToMiniKQLCompile::AcquireLocks [GOOD] >> TTestYqlToMiniKQLCompile::StaticMapTypeOf >> LongTxService::AcquireSnapshot [GOOD] >> LongTxService::LockSubscribe |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |85.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> TTestYqlToMiniKQLCompile::StaticMapTypeOf [GOOD] >> TTestYqlToMiniKQLCompile::SelectRangeAtomInRange [GOOD] >> TTestYqlToMiniKQLCompile::Extract >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus >> Graph::MemoryBordersOnGet [GOOD] >> Graph::LocalBordersOnGet >> TTestYqlToMiniKQLCompile::Extract [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceFromSelect [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceBadRequest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/kqprun |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun |85.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/client/minikql_compile/ut/unittest >> TTestYqlToMiniKQLCompile::Extract [GOOD] |85.6%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun |85.6%| [TS] {RESULT} ydb/core/client/minikql_compile/ut/unittest |85.6%| [TS] {BAZEL_UPLOAD} ydb/core/client/minikql_compile/ut/unittest >> TestPurecalcFilter::WatermarkWhereFalse [GOOD] >> TabletService_Restart::Basics [GOOD] >> TabletService_Restart::OnlyAdminsAllowed >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery >> TestRawParser::Simple >> LongTxService::LockSubscribe [GOOD] >> TestRawParser::Simple [GOOD] >> TestRawParser::ManyValues >> Coordinator::ReadStepSubscribe >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertBlockedByVolatileConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertBlockedByVolatileNoConflict >> TestRawParser::ManyValues [GOOD] >> TestRawParser::ChangeParserSchema >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery >> TestRawParser::ChangeParserSchema [GOOD] >> TestRawParser::TypeKindsValidation >> TestRawParser::TypeKindsValidation [GOOD] >> RangeOps::Intersection [GOOD] >> Graph::LocalBordersOnGet [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/ut/unittest >> LongTxService::LockSubscribe [GOOD] Test command err: 2026-01-08T15:28:35.176777Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:28:35.177299Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpg9BUaR/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:28:35.177855Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpg9BUaR/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpg9BUaR/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5882558118597481859 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:28:35.241343Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 1] Received TEvBeginTx from [1:441:2331] 2026-01-08T15:28:35.241454Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:123: TLongTxService [Node 1] Created new LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.262902Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 2] Received TEvAttachColumnShardWrites from [2:442:2101] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.263053Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 2] Received TEvNodeConnected for NodeId# 1 from session [2:90:2048] 2026-01-08T15:28:35.263218Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 1] Received TEvAttachColumnShardWrites from [2:154:2090] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.263459Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:442:2101] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.263689Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 1] Received TEvCommitTx from [2:154:2090] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.263752Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:162: TLongTxService [Node 1] Committed LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 without side-effects 2026-01-08T15:28:35.263974Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:442:2101] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.264088Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:154:2090] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.264406Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:442:2101] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.264630Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:154:2090] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=1 2026-01-08T15:28:35.264928Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2026-01-08T15:28:35.265006Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2026-01-08T15:28:35.265086Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2026-01-08T15:28:35.265418Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 1 from session [2:90:2048] 2026-01-08T15:28:35.265788Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:62:2076] ServerId# [1:365:2282] TabletId# 72057594037932033 PipeClientId# [2:62:2076] 2026-01-08T15:28:35.265986Z node 2 :TX_PROXY WARN: proxy_impl.cpp:226: actor# [2:152:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2026-01-08T15:28:35.271074Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:442:2101] LongTxId# ydb://long-tx/000000001j76bngxv0775dd4xb?node_id=3 2026-01-08T15:28:35.271393Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 3 from session [2:480:2103] 2026-01-08T15:28:36.158268Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:28:36.158358Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:36.365174Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:37.089557Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:28:37.090150Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpUH266y/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:28:37.090496Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpUH266y/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmpUH266y/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 18097029051159738047 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:28:37.591449Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:346: TLongTxService [Node 3] Received TEvAcquireReadSnapshot from [3:625:2497] for database /dc-1 2026-01-08T15:28:37.591552Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.601948Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.602172Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:821:2678] Sending navigate request for /dc-1 2026-01-08T15:28:37.603953Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:821:2678] Received navigate response status Ok 2026-01-08T15:28:37.604017Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:821:2678] Sending acquire step to coordinator 72057594046316545 2026-01-08T15:28:37.604395Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:821:2678] Received read step 1500 2026-01-08T15:28:37.604461Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 1 2026-01-08T15:28:37.604615Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:625:2497] 2026-01-08T15:28:37.604658Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.615612Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.615806Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:834:2685] Sending navigate request for /dc-1 2026-01-08T15:28:37.616040Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:834:2685] Received navigate response status Ok 2026-01-08T15:28:37.616096Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:834:2685] Sending acquire step to coordinator 72057594046316545 2026-01-08T15:28:37.616333Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:834:2685] Received read step 1500 2026-01-08T15:28:37.616424Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 2 2026-01-08T15:28:37.616476Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:425: TLongTxService [Node 3] Created new read-only LongTxId# ydb://long-tx/read-only?snapshot=1500%3Amax 2026-01-08T15:28:37.616674Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:625:2497] 2026-01-08T15:28:37.616714Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.627816Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2026-01-08T15:28:37.628015Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:836:2687] Sending navigate request for /dc-1 2026-01-08T15:28:37.628277Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:836:2687] Received navigate response status Ok 2026-01-08T15:28:37.628334Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:836:2687] Sending acquire step to coordinator 72057594046316545 2026-01-08T15:28:37.628560Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:836:2687] Received read step 1500 2026-01-08T15:28:37.628660Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 3 2026-01-08T15:28:37.628734Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:423: TLongTxService [Node 3] Created new read-write LongTxId# ydb://long-tx/00000001ee5z4vypkrjacfn0c5?node_id=3&snapshot=1500%3Amax 2026-01-08T15:28:38.575985Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:924} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2026-01-08T15:28:38.576326Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2980} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmppEF18I/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2026-01-08T15:28:38.576475Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:267} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmppEF18I/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfig Path# "/home/runner/.ya/build/build_root/663n/001f2a/r3tmp/tmppEF18I/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 75982471330842508 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0 SeparateHugePriorities# 0} PDiskId# 1000 2026-01-08T15:28:38.625470Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:468: TLongTxService [Node 5] Received TEvRegisterLock for LockId# 123 2026-01-08T15:28:38.625607Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:441:2331] for LockId# 987 LockNode# 5 2026-01-08T15:28:38.637338Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:442:2101] for LockId# 987 LockNode# 5 2026-01-08T15:28:38.637473Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:91:2048] 2026-01-08T15:28:38.637607Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:153:2090] for LockId# 987 LockNode# 5 2026-01-08T15:28:38.639099Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:152:2138] for LockId# 987 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2026-01-08T15:28:38.639310Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:441:2331] for LockId# 123 LockNode# 5 2026-01-08T15:28:38.639462Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:442:2101] for LockId# 123 LockNode# 5 2026-01-08T15:28:38.639622Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:153:2090] for LockId# 123 LockNode# 5 2026-01-08T15:28:38.639818Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:152:2138] for LockId# 123 LockNode# 5 LockStatus# STATUS_SUBSCRIBED 2026-01-08T15:28:38.639962Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:479: TLongTxService [Node 5] Received TEvUnregisterLock for LockId# 123 2026-01-08T15:28:38.640110Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:152:2138] for LockId# 123 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2026-01-08T15:28:38.640279Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:442:2101] for LockId# 234 LockNode# 5 2026-01-08T15:28:38.640584Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 6 2026-01-08T15:28:38.640676Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:28:38.640782Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2026-01-08T15:28:38.641119Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:91:2048] 2026-01-08T15:28:38.641598Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:64:2076] ServerId# [5:363:2280] TabletId# 72057594037932033 PipeClientId# [6:64:2076] 2026-01-08T15:28:38.641817Z node 6 :TX_PROXY WARN: proxy_impl.cpp:226: actor# [6:151:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2026-01-08T15:28:38.826599Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:472:2048] 2026-01-08T15:28:38.826919Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:28:38.826980Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2026-01-08T15:28:38.827427Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:472:2048] 2026-01-08T15:28:38.827615Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:473:2102] ServerId# [5:477:2350] TabletId# 72057594037932033 PipeClientId# [6:473:2102] 2026-01-08T15:28:39.078692Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:493:2048] 2026-01-08T15:28:39.078988Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:28:39.079041Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2026-01-08T15:28:39.079588Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:492:2103] ServerId# [5:497:2361] TabletId# 72057594037932033 PipeClientId# [6:492:2103] 2026-01-08T15:28:39.079952Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:493:2048] 2026-01-08T15:28:39.372284Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:515:2048] 2026-01-08T15:28:39.372630Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2026-01-08T15:28:39.372694Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2026-01-08T15:28:39.372942Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:515:2048] 2026-01-08T15:28:39.373467Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:514:2105] ServerId# [5:519:2374] TabletId# 72057594037932033 PipeClientId# [6:514:2105] |85.6%| [TS] {BAZEL_UPLOAD} ydb/core/tx/long_tx_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_range_ops/unittest >> RangeOps::Intersection [GOOD] Test command err: first [(Uint64 : NULL, Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 5)] result [(Uint64 : 10) ; (Uint64 : 5)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 17)] result [(Uint64 : 15) ; (Uint64 : 17)] correct [(Uint64 : 15) ; (Uint64 : 17)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 20)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 30)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 20)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 30)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 25) ; (Uint64 : 30)] result [(Uint64 : 25) ; (Uint64 : 20)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result ((Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result ((Uint64 : 10) ; (Uint64 : 15)] correct ((Uint64 : 10) ; (Uint64 : 15)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)) result ((Uint64 : 10) ; (Uint64 : 15)) correct ((Uint64 : 10) ; (Uint64 : 15)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; (Uint64 : 20)) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 20)) correct [(Uint64 : 1) ; (Uint64 : 20)) first [(Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 10) ; (Uint64 : 20)) correct [(Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; (Uint64 : 10)] second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 10)] correct [(Uint64 : 1) ; (Uint64 : 10)] first [(Uint64 : NULL) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 1) ; (Uint64 : 10)) correct [(Uint64 : 1) ; (Uint64 : 10)) |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_range_ops/unittest >> TMemoryController::Counters_NoHardLimit [GOOD] >> TMemoryController::Config_ConsumerLimits >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/graph/ut/unittest >> Graph::LocalBordersOnGet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:28:14.809866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:28:14.809967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:28:14.810030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:28:14.810080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:28:14.810120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:28:14.810154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:28:14.810208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:28:14.810266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:28:14.811132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:28:14.811426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:28:14.878021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:28:14.878082Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:14.885854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:28:14.886082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:28:14.886214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:28:14.895355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:28:14.895686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:28:14.896276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:28:14.897033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:28:14.900530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:28:14.900752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:28:14.902023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:28:14.902094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:28:14.902214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:28:14.902268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:28:14.902334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:28:14.902506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:28:15.066670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.067849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.068910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.069010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:28:15.069111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 8T15:28:40.126756Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.126789Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.126883Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 109 } Time: 109 2026-01-08T15:28:40.126911Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.126941Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.126975Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127079Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 110 } Time: 110 2026-01-08T15:28:40.127102Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.127132Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.127164Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127230Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 111 } Time: 111 2026-01-08T15:28:40.127255Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.127284Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.127317Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127404Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 112 } Time: 112 2026-01-08T15:28:40.127430Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.127460Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.127495Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127596Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 113 } Time: 113 2026-01-08T15:28:40.127620Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.127651Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.127684Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127769Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 114 } Time: 114 2026-01-08T15:28:40.127794Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.127824Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.127857Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.127947Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 115 } Time: 115 2026-01-08T15:28:40.127977Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.128006Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.128041Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.128113Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 116 } Time: 116 2026-01-08T15:28:40.128137Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.128166Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.128214Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.128323Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 117 } Time: 117 2026-01-08T15:28:40.128349Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.128379Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.128410Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.128489Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 118 } Time: 118 2026-01-08T15:28:40.128514Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.128542Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.128571Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.128638Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 119 } Time: 119 2026-01-08T15:28:40.128663Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2026-01-08T15:28:40.128692Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2026-01-08T15:28:40.128725Z node 6 :GRAPH TRACE: backends.cpp:329: DB Stored metrics 2026-01-08T15:28:40.128804Z node 6 :GRAPH TRACE: shard_impl.cpp:226: SHARD Handle TEvGraph::TEvGetMetrics from [6:1015:2947] 2026-01-08T15:28:40.128879Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:20: SHARD TTxGetMetrics::Execute 2026-01-08T15:28:40.128941Z node 6 :GRAPH DEBUG: backends.cpp:352: DB Querying from 0 to 119 2026-01-08T15:28:40.148534Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148613Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148633Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148650Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148670Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148689Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148707Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148723Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148741Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148757Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148773Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148791Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148808Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148825Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148842Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148862Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148896Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148923Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148941Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148958Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.148983Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149007Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149025Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149041Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149059Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149075Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149091Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149109Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149128Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149148Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149181Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149200Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149220Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149238Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149254Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149270Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149286Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149308Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149326Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149348Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149367Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149383Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149403Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149427Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149447Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149464Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149487Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149510Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149530Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149550Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149568Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149586Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149606Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149624Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149642Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149662Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149681Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149700Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149718Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149738Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2026-01-08T15:28:40.149769Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:25: SHARD TTxGetMetric::Complete 2026-01-08T15:28:40.149833Z node 6 :GRAPH TRACE: tx_get_metrics.cpp:26: SHARD TxGetMetrics returned 60 points for request 3 2026-01-08T15:28:40.149954Z node 6 :GRAPH TRACE: service_impl.cpp:201: SVC TEvMetricsResult 3 2026-01-08T15:28:40.149997Z node 6 :GRAPH TRACE: service_impl.cpp:204: SVC TEvMetricsResult found request 3 resending to [6:1016:2948] |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/graph/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] >> TTxDataShardFilterKMeansScan::BadRequest [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> TestRawParser::TypeKindsValidation [GOOD] Test command err: 2026-01-08T15:25:42.204308Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(col_0 == "str1", FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:25:42.204532Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:25:42.204544Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(col_0 == "str1", FALSE) AS _filter, _offset FROM Input; ' (client id: [0:0:0]) 2026-01-08T15:25:42.204579Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 1 2026-01-08T15:25:42.242618Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 1 from [1:7593012307063587795:2051] 2026-01-08T15:25:48.183225Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [1:7593012307063587795:2051] [id 1]: Started compile request 2026-01-08T15:25:48.903633Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [1:7593012307063587795:2051] [id 1]: Compilation completed for request 2026-01-08T15:25:48.903769Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 1 from [1:7593012307063587795:2051] 2026-01-08T15:25:48.906881Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:57: TTopicFilters: Got compile response for request with id 1 2026-01-08T15:25:48.906915Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:25:48.907101Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:110: TTopicFilters: Start program with client id [0:0:0] 2026-01-08T15:25:48.907160Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(col_1 == "str2", FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:25:48.908448Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [1:0:0] 2026-01-08T15:25:48.908462Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(col_1 == "str2", FALSE) AS _filter, _offset FROM Input; ' (client id: [1:0:0]) 2026-01-08T15:25:48.908484Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 2 2026-01-08T15:25:48.910408Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 2 from [1:7593012307063587795:2051] 2026-01-08T15:25:48.910607Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [1:7593012307063587795:2051] [id 2]: Started compile request 2026-01-08T15:25:48.961467Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [1:7593012307063587795:2051] [id 2]: Compilation completed for request 2026-01-08T15:25:48.961659Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 2 from [1:7593012307063587795:2051] 2026-01-08T15:25:48.964567Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:57: TTopicFilters: Got compile response for request with id 2 2026-01-08T15:25:48.964591Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:25:48.964613Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:110: TTopicFilters: Start program with client id [1:0:0] 2026-01-08T15:25:48.964855Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:439: GenerateSql: No sql was generated 2026-01-08T15:25:48.965039Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [2:0:0] 2026-01-08T15:25:48.965246Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:110: TTopicFilters: Start program with client id [2:0:0] 2026-01-08T15:25:48.965658Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:34: TTopicFilters: ProcessData for 3 clients, number rows: 3 2026-01-08T15:25:49.427040Z node 2 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a1 = "str1", FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:25:49.427305Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:25:49.427325Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a1 = "str1", FALSE) AS _filter, _offset FROM Input; ' (client id: [0:0:0]) 2026-01-08T15:25:49.427360Z node 2 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 1 2026-01-08T15:25:49.427463Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 1 from [2:7593012336573798738:2051] 2026-01-08T15:25:52.381521Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [2:7593012336573798738:2051] [id 1]: Started compile request 2026-01-08T15:25:52.400984Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [2:7593012336573798738:2051] [id 1]: Compilation completed for request 2026-01-08T15:25:52.401115Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 1 from [2:7593012336573798738:2051] 2026-01-08T15:25:52.401244Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:57: TTopicFilters: Got compile response for request with id 1 2026-01-08T15:25:52.401265Z node 2 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:25:52.401359Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:110: TTopicFilters: Start program with client id [0:0:0] 2026-01-08T15:25:52.401396Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:25:52.401442Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:94: TTopicFilters: Remove program with client id [0:0:0] 2026-01-08T15:25:52.774034Z node 3 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 ... 50, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:25:52.774233Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:25:52.774250Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 ... 50, FALSE) AS _filter, _offset FROM Input; ' (client id: [0:0:0]) 2026-01-08T15:25:52.774295Z node 3 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 1 2026-01-08T15:25:52.782604Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 1 from [3:7593012348336841224:2051] 2026-01-08T15:26:01.998823Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [3:7593012348336841224:2051] [id 1]: Started compile request 2026-01-08T15:26:02.040120Z node 3 :FQ_ROW_DISPATCHER ERROR: compile_service.cpp:67: TPurecalcCompileActor [3:7593012348336841224:2051] [id 1]: Compilation failed for request 2026-01-08T15:26:02.040559Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 1 from [3:7593012348336841224:2051] 2026-01-08T15:26:02.040857Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:57: TTopicFilters: Got compile response for request with id 1 2026-01-08T15:26:02.041080Z node 3 :FQ_ROW_DISPATCHER ERROR: purecalc_filter.cpp:375: TProgramCompileHandler: Program compilation error: {
: Error: Failed to compile purecalc program subissue: {
: Error: Compile issues: generated.sql:3:27: Error: extraneous input '(' expecting {, ';'} } subissue: {
: Error: Final yql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 ... 50, FALSE) AS _filter, _offset FROM Input; } } 2026-01-08T15:26:02.404607Z node 4 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(TRUE, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; 2026-01-08T15:26:02.404859Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:26:02.404876Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(TRUE, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; ' (client id: [0:0:0]) 2026-01-08T15:26:02.404920Z node 4 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 1 2026-01-08T15:26:02.405986Z node 4 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 1 from [4:7593012390950969672:2051] 2026-01-08T15:26:05.408817Z node 4 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [4:7593012390950969672:2051] [id 1]: Started compile request 2026-01-08T15:26:05.666284Z node 4 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [4:7593012390950969672:2051] [id 1]: Compilation completed for request 2026-01-08T15:26:05.666453Z node 4 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 1 from [4:7593012390950969672:2051] 2026-01-08T15:26:05.666611Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:57: TTopicFilters: Got compile response for request with id 1 2026-01-08T15:26:05.666640Z node 4 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:26:05.666682Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:110: TTopicFilters: Start program with client id [0:0:0] 2026-01-08T15:26:05.666736Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:34: TTopicFilters: ProcessData for 1 clients, number rows: 2 2026-01-08T15:26:05.666749Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:223: TTopicFilters: Pass 2 rows to purecalc filter (client id: [0:0:0]) 2026-01-08T15:26:05.666775Z node 4 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:26:06.044301Z node 5 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(pass > 0, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; 2026-01-08T15:26:06.044563Z node 5 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:116: TTopicFilters: Create program with client id [0:0:0] 2026-01-08T15:26:06.044585Z node 5 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:123: TTopicFilters: Create purecalc program for query ' PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(pass > 0, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _wate ... 1Z node 40 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [40:7593012793082380394:2051] [id 0]: Compilation completed for request 2026-01-08T15:27:51.355110Z node 40 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [40:7593012793082380394:2051] 2026-01-08T15:27:51.359200Z node 40 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:27:51.359319Z node 40 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 1 rows 2026-01-08T15:27:51.359847Z node 40 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 1 rows 2026-01-08T15:27:52.539668Z node 41 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 > 100, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:27:52.539974Z node 41 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:27:52.541133Z node 41 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [41:7593012862465350487:2051] 2026-01-08T15:28:03.720720Z node 41 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [41:7593012862465350487:2051] [id 0]: Started compile request 2026-01-08T15:28:05.392892Z node 41 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [41:7593012862465350487:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:05.393225Z node 41 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [41:7593012862465350487:2051] 2026-01-08T15:28:06.075762Z node 42 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 > 100, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:28:06.076046Z node 42 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:06.076129Z node 42 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [42:7593012924391379545:2051] 2026-01-08T15:28:10.466456Z node 42 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [42:7593012924391379545:2051] [id 0]: Started compile request 2026-01-08T15:28:10.511358Z node 42 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [42:7593012924391379545:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:10.511517Z node 42 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [42:7593012924391379545:2051] 2026-01-08T15:28:11.091241Z node 43 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a1 is null, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:28:11.091561Z node 43 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:11.092490Z node 43 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [43:7593012946020696975:2051] 2026-01-08T15:28:14.974404Z node 43 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [43:7593012946020696975:2051] [id 0]: Started compile request 2026-01-08T15:28:15.004109Z node 43 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [43:7593012946020696975:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:15.004214Z node 43 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [43:7593012946020696975:2051] 2026-01-08T15:28:15.004402Z node 43 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:28:15.004521Z node 43 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 1 rows 2026-01-08T15:28:15.525362Z node 44 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 > 50, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:28:15.525762Z node 44 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:15.525893Z node 44 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [44:7593012964275580383:2051] 2026-01-08T15:28:19.400683Z node 44 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [44:7593012964275580383:2051] [id 0]: Started compile request 2026-01-08T15:28:20.052755Z node 45 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(a2 ... 50, FALSE) AS _filter, _offset FROM Input; 2026-01-08T15:28:20.053011Z node 45 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:20.058471Z node 45 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [45:7593012983534512280:2051] 2026-01-08T15:28:23.946418Z node 45 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [45:7593012983534512280:2051] [id 0]: Started compile request 2026-01-08T15:28:24.773436Z node 47 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(TRUE, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; 2026-01-08T15:28:24.773723Z node 47 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:24.783013Z node 47 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [47:7593013001977532148:2051] 2026-01-08T15:28:29.190723Z node 47 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [47:7593013001977532148:2051] [id 0]: Started compile request 2026-01-08T15:28:29.230478Z node 47 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [47:7593013001977532148:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:29.230614Z node 47 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [47:7593013001977532148:2051] 2026-01-08T15:28:29.230969Z node 47 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:28:29.231071Z node 47 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:28:30.176098Z node 48 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(pass > 0, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; 2026-01-08T15:28:30.176496Z node 48 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:30.191158Z node 48 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [48:7593013028033981275:2051] 2026-01-08T15:28:34.429337Z node 48 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [48:7593013028033981275:2051] [id 0]: Started compile request 2026-01-08T15:28:34.458764Z node 48 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [48:7593013028033981275:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:34.458887Z node 48 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [48:7593013028033981275:2051] 2026-01-08T15:28:34.459188Z node 48 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:28:34.459285Z node 48 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:28:34.459402Z node 48 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:28:35.135639Z node 49 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:461: GenerateSql: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT COALESCE(FALSE, FALSE) AS _filter, _offset, (CAST(`ts` AS Timestamp?) - Interval("PT5S")) AS _watermark FROM Input; 2026-01-08T15:28:35.136005Z node 49 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:341: TProgramCompileHandler: Send compile request with id 0 2026-01-08T15:28:35.136116Z node 49 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:131: TPurecalcCompileService: Add to compile queue request with id 0 from [49:7593013051049431298:2051] 2026-01-08T15:28:39.228264Z node 49 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:52: TPurecalcCompileActor [49:7593013051049431298:2051] [id 0]: Started compile request 2026-01-08T15:28:39.278842Z node 49 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:70: TPurecalcCompileActor [49:7593013051049431298:2051] [id 0]: Compilation completed for request 2026-01-08T15:28:39.278999Z node 49 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:151: TPurecalcCompileService: Compile finished for request with id 0 from [49:7593013051049431298:2051] 2026-01-08T15:28:39.279267Z node 49 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:370: TProgramCompileHandler: Program compilation finished 2026-01-08T15:28:39.279350Z node 49 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:28:39.279446Z node 49 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:408: TProgramRunHandler: ProcessData for 2 rows 2026-01-08T15:28:39.779917Z node 50 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:56: TRawParser: Add 1 messages to parse 2026-01-08T15:28:39.779950Z node 50 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1__large_str", "a2": 101, "event": "event1"} 2026-01-08T15:28:39.897115Z node 51 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:56: TRawParser: Add 3 messages to parse 2026-01-08T15:28:39.897155Z node 51 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1", "a2": "101", "event": "event1"} 2026-01-08T15:28:39.897227Z node 51 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 43, value: {"a1": "hello1", "a2": "101", "event": "event2"} 2026-01-08T15:28:39.897251Z node 51 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 44, value: {"a2": "101", "a1": "hello1", "event": "event3"} 2026-01-08T15:28:40.045494Z node 52 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:56: TRawParser: Add 1 messages to parse 2026-01-08T15:28:40.045556Z node 52 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1__large_str", "a2": 101, "event": "event1"} 2026-01-08T15:28:40.045788Z node 52 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:56: TRawParser: Add 1 messages to parse 2026-01-08T15:28:40.045809Z node 52 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:93: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello2__large_str", "a2": 101, "event": "event2"} |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> TTxDataShardFilterKMeansScan::BuildToPosting >> TGRpcRateLimiterTest::CreateResource [GOOD] >> TGRpcRateLimiterTest::UpdateResource >> Vacuum::MultipleVacuumsWithOldGenerations [GOOD] >> Vacuum::VacuumWithRestart >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount >> TCreateAndDropViewTest::CreateViewIfNotExists [GOOD] >> TCreateAndDropViewTest::DropView >> test_cte.py::TestCte::test_toplevel >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_init/unittest >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] Test command err: 2026-01-08T15:28:32.340873Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:106:2139], Recipient [1:112:2143]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:32.349691Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:106:2139], Recipient [1:112:2143]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:32.350179Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:112:2143] 2026-01-08T15:28:32.350622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:32.402943Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:106:2139], Recipient [1:112:2143]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:32.408874Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:32.409045Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:32.410903Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2026-01-08T15:28:32.411013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 9437184 2026-01-08T15:28:32.411084Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 9437184 2026-01-08T15:28:32.411505Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:32.411868Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:32.411997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 9437184 persisting started state actor id [1:136:2143] in generation 2 2026-01-08T15:28:32.453112Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:32.488770Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 9437184 2026-01-08T15:28:32.489019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:28:32.489152Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 9437184, actorId: [1:141:2163] 2026-01-08T15:28:32.489198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:28:32.489236Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2026-01-08T15:28:32.489267Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:28:32.489452Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:112:2143], Recipient [1:112:2143]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:32.489492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:32.489644Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2026-01-08T15:28:32.489727Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2026-01-08T15:28:32.489769Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:28:32.489806Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:32.489837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2026-01-08T15:28:32.489865Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2026-01-08T15:28:32.489896Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:28:32.489932Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2026-01-08T15:28:32.489963Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:28:32.491212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269551617, Sender [1:103:2137], Recipient [1:112:2143]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 4294969433 } 2026-01-08T15:28:32.491297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvDataShard::TEvGetShardState ... waiting for SysViewsRoster update finished 2026-01-08T15:28:35.908938Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:36.037551Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:36.039852Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:36.040214Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:36.040321Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:36.333207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:36.333357Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:36.427582Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886112891671 != 1767886112891674 2026-01-08T15:28:36.442557Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:36.492883Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:36.573260Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:36.874333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:36.888783Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:36.995366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:37.025600Z node 2 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:886:2765] 2026-01-08T15:28:37.025896Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:37.078713Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:37.078869Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:37.080729Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:28:37.080832Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:28:37.080886Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:28:37.081298Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:37.081484Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:37.081593Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [2:901:2765] in generation 1 2026-01-08T15:28:37.092556Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:37.092655Z node 2 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:28:37.092795Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:28:37.092887Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [2:903:2775] 2026-01-08T15:28:37.092931Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:37.092972Z node 2 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:28:37.093013Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:37.093474Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:28:37.093587Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:28:37.093710Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:37.093752Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:37.093813Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:28:37.093857Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:37.093950Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:883:2763], serverId# [2:888:2766], sessionId# [0:0:0] 2026-01-08T15:28:37.094671Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:28:37.094965Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:28:37.095078Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:28:37.097039Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:28: ... planned 0 immediate 0 planned 1 2026-01-08T15:28:41.745496Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:28:41.745750Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:28:41.745867Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:28:41.746067Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:41.746132Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:28:41.746579Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:28:41.746964Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:41.749112Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:28:41.749164Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:41.750602Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:28:41.750698Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:41.751862Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:41.751908Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:41.751953Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:28:41.752018Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:28:41.752081Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:28:41.752151Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:41.753789Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:28:41.755427Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:28:41.755573Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:28:41.755632Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:28:41.761482Z node 3 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:28:41.761678Z node 3 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 2026-01-08T15:28:41.803309Z node 3 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:956:2812] 2026-01-08T15:28:41.803593Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:41.808376Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:41.809487Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:41.813761Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:28:41.813860Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:28:41.813936Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:28:41.814401Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:41.814624Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:41.814692Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [3:971:2812] in generation 2 2026-01-08T15:28:41.836776Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:41.836908Z node 3 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037888 2026-01-08T15:28:41.837016Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:28:41.837121Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:28:41.837207Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4210: Resolve path at 72075186224037888: reason# empty path 2026-01-08T15:28:41.837325Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [3:975:2822] 2026-01-08T15:28:41.837381Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:41.837425Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:28:41.837466Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:41.837728Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:742: TxInitSchemaDefaults.Execute 2026-01-08T15:28:41.837998Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:754: TxInitSchemaDefaults.Complete 2026-01-08T15:28:41.838942Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5937: Got TEvDataShard::TEvSchemaChanged for unknown txId 281474976715657 message# Source { RawX1: 956 RawX2: 12884904700 } Origin: 72075186224037888 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2026-01-08T15:28:41.839270Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:28:41.839366Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:28:41.839523Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 2000 2026-01-08T15:28:41.839564Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:41.839910Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:28:41.839995Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:41.840033Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:41.840075Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:28:41.840116Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:41.840340Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:28:41.873772Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4271: Got scheme resolve result at 72075186224037888: Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 38 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 2000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046644480 2026-01-08T15:28:41.874158Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:28:41.874352Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:28:41.874595Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:20: TTxStoreTablePath::Execute at 72075186224037888 2026-01-08T15:28:41.875900Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:979:2826], serverId# [3:981:2827], sessionId# [0:0:0] 2026-01-08T15:28:41.890345Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:39: TTxStoreTablePath::Complete at 72075186224037888 |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_init/unittest >> BasicUsage::CreateTopicWithCustomMetricsLevel [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKeyV1 [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKeyV2 >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_WriteEncoded |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |85.6%| [TS] {RESULT} ydb/core/tx/long_tx_service/ut/unittest |85.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_range_ops/unittest |85.6%| [TM] {RESULT} ydb/core/graph/ut/unittest |85.6%| [TM] {RESULT} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest |85.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_init/unittest |85.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection >> TListAllTopicsTests::ListLimitAndPaging [GOOD] >> TabletService_Restart::OnlyAdminsAllowed [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] >> test_yt_reading.py::TestYtReading::test_partitioned_reading >> test.py::test[solomon-BadDownsamplingAggregation-] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/public/fetcher/ut/ydb-core-persqueue-public-fetcher-ut |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/public/fetcher/ut/ydb-core-persqueue-public-fetcher-ut |85.6%| [LD] {RESULT} $(B)/ydb/core/persqueue/public/fetcher/ut/ydb-core-persqueue-public-fetcher-ut >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::CreateTopicWithCustomMetricsLevel [GOOD] Test command err: 2026-01-08T15:26:07.046395Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1767885967046361 2026-01-08T15:26:07.651279Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012413579705960:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:07.652039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:08.315883Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.352111Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.782418Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.782711Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:08.786438Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.786529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:08.910568Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.031807Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.176656Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.328192Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.633272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.643019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.647333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.647399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.814442Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:26:09.820966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.821749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.822687Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:10.832417Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:10.897818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:11.091471Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:11.102439Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:12.358917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003ea5/r3tmp/yandexzV7bb3.tmp 2026-01-08T15:26:12.358940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003ea5/r3tmp/yandexzV7bb3.tmp 2026-01-08T15:26:12.359632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003ea5/r3tmp/yandexzV7bb3.tmp 2026-01-08T15:26:12.360025Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:12.606251Z INFO: TTestServer started on Port 30500 GrpcPort 23850 2026-01-08T15:26:12.637010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012413579705960:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:12.637080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; PQClient connected to localhost:23850 2026-01-08T15:26:12.916000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:24.043036Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012487211697682:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.046395Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012487211697671:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.046506Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.054594Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012487211697708:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.054734Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.169349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:26:24.211337Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3e9ph1h0yy27hhshk6tdw", Request deadline has expired for 6.079991s seconds 2026-01-08T15:26:24.328155Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593012487211697685:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2026-01-08T15:26:24.443187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:26:24.443903Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:24.674270Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593012487211697717:2153] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:26:27.385529Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593012486594151515:2365], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:26:27.415721Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=YjU2YmUwMDItYWY3NDc5NzQtYmI1MDYzYmEtZGRiMjhiMmU=, ActorId: [1:7593012486594151421:2356], ActorState: ExecuteState, LegacyTraceId: 01kef3emeaatxv6959dmmhezvs, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:26:27.441750Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:26:27.456424Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593012487211697724:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:26:27.462997Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} Sessio ... le] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.051265Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.064584Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.064636Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.064657Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.064682Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.064704Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.151476Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.151527Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.151544Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.151564Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.151591Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.164903Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.164943Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.164959Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.164980Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.164998Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.251954Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.251994Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.252012Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.252034Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.252053Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.265216Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.265250Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.265267Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.265289Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.265306Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.353178Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.353218Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.353235Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.353256Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.353271Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.366404Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.366450Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.366469Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.366488Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.366504Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.372862Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:28:42.372893Z node 9 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:42.454394Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.454425Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.454438Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.454453Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.454466Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.467425Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.467465Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.467483Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.467503Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.467520Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.553981Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.554023Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.554041Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.554073Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.554092Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.568314Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.568357Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.568374Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.568404Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.568421Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.654702Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.654739Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.654755Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.654775Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.654790Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.669193Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.669232Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.669249Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.669269Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.669287Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.755333Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.755374Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.755393Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.755417Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.755439Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:42.769964Z node 9 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:42.770005Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.770024Z node 9 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:42.770047Z node 9 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:42.770066Z node 9 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist |85.6%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:20.828270Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:20.964832Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:20.965214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:20.965362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:21.279805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:21.279976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:21.374998Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886097774662 != 1767886097774666 2026-01-08T15:28:21.386428Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:21.442278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:21.549967Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:21.861411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:21.875048Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:21.977546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:22.281149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:983:2832], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:22.281250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:994:2837], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:22.281342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:22.281995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:997:2840], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:22.282112Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:22.285405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:22.425484Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:998:2841], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:22.519246Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:1055:2879] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } { items { int64_value: 1 } items { uint32_value: 1 } }, { items { int64_value: 2 } items { uint32_value: 2 } }, { items { int64_value: 3 } items { uint32_value: 3 } }, { items { int64_value: 4 } items { uint32_value: 4 } }, { items { int64_value: 5 } items { uint32_value: 5 } }, { items { int64_value: 6 } items { uint32_value: 6 } }, { items { int64_value: 7 } items { uint32_value: 7 } }, { items { int64_value: 8 } items { uint32_value: 8 } }, { items { int64_value: 9 } items { uint32_value: 9 } } ... waiting for SysViewsRoster update finished 2026-01-08T15:28:27.899111Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:27.949588Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:27.952096Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:27.952562Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:27.952737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:28.181781Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:28.181916Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:28.203209Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886104481581 != 1767886104481584 2026-01-08T15:28:28.209836Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:28.259314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:28.344363Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:28.659301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:28.673121Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:28.785334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:29.087444Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1030:2869], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.087614Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1041:2874], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.087699Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.088708Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1046:2879], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.088841Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:29.093320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:29.235859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:1044:2877], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:29.271202Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:1103:2917] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } { items { int64_value: 1 } items { uint32_value: 1 } }, { items { int64_value: 2 } items { uint32_value: 2 } }, { items { int64_value: 3 } items { uint32_value: 3 } }, { items { int64_value: 4 } items { uint32_value: 4 } }, { items { int64_ ... ervice.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:36.517939Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:36.532131Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:36.638141Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:36.939259Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:983:2832], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:36.939373Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:993:2837], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:36.939440Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:36.939999Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:997:2840], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:36.940163Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:36.943746Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:37.082958Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:998:2841], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:37.117868Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1055:2879] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } { items { int64_value: 1 } items { uint32_value: 303 } } { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } } { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } }, { items { int64_value: 3 } items { uint32_value: 303 } } ... waiting for SysViewsRoster update finished 2026-01-08T15:28:42.722368Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:42.730758Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:42.735758Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:111:2158], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:42.736133Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:42.736353Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:43.061176Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:43.061341Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:43.089507Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:34:2081] 1767886119001057 != 1767886119001061 2026-01-08T15:28:43.094859Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:43.139329Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:43.214034Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:43.509029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:43.523524Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:43.638309Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:43.973282Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:983:2832], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:43.973415Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:993:2837], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:43.973507Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:43.974655Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:999:2842], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:43.974874Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:43.979792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:44.110625Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:997:2840], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:44.148338Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1055:2879] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:28:44.531772Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [4:1065:2888], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017 2026-01-08T15:28:44.534970Z node 4 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=4&id=YjkwM2MxZTMtYTNiZDZjYTEtOWJmNGMwMzMtNmI2MWNiNzY=, ActorId: [4:980:2829], ActorState: ExecuteState, LegacyTraceId: 01kef3jwy39enat17c3v4e8qx1, ReplyQueryCompileError, remove tx status# BAD_REQUEST issues# { message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 1 column: 98 } message: "Key columns are not specified." end_position { row: 1 column: 98 } issue_code: 2017 severity: 1 } }{ message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 1 column: 98 } message: "Key columns are not specified." end_position { row: 1 column: 98 } issue_code: 2017 severity: 1 } } tx_id# trace_id# 2026-01-08T15:28:44.584945Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [4:1087:2904], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017 2026-01-08T15:28:44.588633Z node 4 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=4&id=MTM1ZmVjMzUtNGEzM2NjYS03YzdkZTAyZi1kN2Y0MDgxOA==, ActorId: [4:1079:2896], ActorState: ExecuteState, LegacyTraceId: 01kef3jxfv2gswe7yayyeyehd6, ReplyQueryCompileError, remove tx status# BAD_REQUEST issues# { message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 1 column: 103 } message: "Key columns are not specified." end_position { row: 1 column: 103 } issue_code: 2017 severity: 1 } }{ message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 1 column: 103 } message: "Key columns are not specified." end_position { row: 1 column: 103 } issue_code: 2017 severity: 1 } } tx_id# trace_id# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/public/list_topics/ut/unittest >> TListAllTopicsTests::ListLimitAndPaging [GOOD] Test command err: 2026-01-08T15:28:12.036590Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012951780929114:2066];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:12.044573Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:12.065928Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:12.068826Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:28:12.337543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:12.337653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:12.340899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:12.428210Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:12.430202Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:12.442566Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012951780929088:2081] 1767886092035155 != 1767886092035158 2026-01-08T15:28:12.502269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/002381/r3tmp/yandex8UD8rx.tmp 2026-01-08T15:28:12.502303Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/002381/r3tmp/yandex8UD8rx.tmp 2026-01-08T15:28:12.502500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/002381/r3tmp/yandex8UD8rx.tmp 2026-01-08T15:28:12.502622Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:12.540858Z INFO: TTestServer started on Port 61363 GrpcPort 18387 2026-01-08T15:28:12.612891Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions PQClient connected to localhost:18387 2026-01-08T15:28:12.761468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:12.771297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:28:12.784076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:28:12.860047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:13.044274Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:15.365216Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012964665832010:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.365308Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012964665832019:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.365386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.371729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012964665832026:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.371836Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:15.374799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:15.387678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012964665832024:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:28:15.464708Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012964665832091:2643] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:28:15.704968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:15.717783Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593012964665832099:2342], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:28:15.720321Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=MjY3OGNkM2QtYTNhZDY0NzMtYzNhM2MyOS04MjU1MzYyMg==, ActorId: [1:7593012964665832003:2329], ActorState: ExecuteState, LegacyTraceId: 01kef3j1035wvp53kxb7gk587x, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:28:15.722515Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:28:15.769204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:15.961298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593012968960799685:2822] 2026-01-08T15:28:17.038947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012951780929114:2066];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:17.039030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2026-01-08T15:28:22.423495Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:196: new Create topic request 2026-01-08T15:28:22.432154Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:196: new Create topic request 2026-01-08T15:28:22.468934Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:28:22.469416Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593012994730603697:2940] connected; active server actors: 1 2026-01-08T15:28:22.469549Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:28:22.469591Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][topic1] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:28:22.470092Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3055: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2026-01-08T15:28:22.470253Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:788: [PQ: 72075186224037892] no config, start with empty partitions and default config 2026-01-08T15:28:22.470280Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037892] disable metering: r ... ess user action and tx pending commits 2026-01-08T15:28:44.245921Z node 3 :PERSQUEUE DEBUG: partition.cpp:2480: [72075186224037894][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxPendingCommit(TTransaction[ProposeConfig]) 2026-01-08T15:28:44.246061Z node 3 :PERSQUEUE DEBUG: partition.cpp:3950: [72075186224037894][Partition][0][StateIdle] Schedule reply tx done 281474976710676 2026-01-08T15:28:44.246078Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 1 2026-01-08T15:28:44.246090Z node 3 :PERSQUEUE DEBUG: partition.cpp:2342: [72075186224037894][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:28:44.246104Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.246298Z node 3 :PERSQUEUE DEBUG: read.h:275: [72075186224037894][PQCacheProxy]CacheProxy. Passthrough write request to KV 2026-01-08T15:28:44.246855Z node 3 :PERSQUEUE DEBUG: partition.cpp:2153: [72075186224037896][Partition][0][StateIdle] Received TEvKeyValue::TEvResponse 2026-01-08T15:28:44.247128Z node 3 :PERSQUEUE DEBUG: partition_mlp.cpp:122: [72075186224037896][Partition][0][StateIdle] Initializing MLP Consumers: 0 2026-01-08T15:28:44.247202Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:572: [72075186224037896][Partition][0][StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2026-01-08T15:28:44.247220Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.247233Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.247242Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.247260Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.247272Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.247291Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:183: [72075186224037896][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:28:44.247672Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:399: [PQ: 72075186224037896] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic2" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir1/topic2" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MonitoringProjectId: "" 2026-01-08T15:28:44.247734Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:28:44.247860Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3626: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2026-01-08T15:28:44.248241Z node 3 :PERSQUEUE DEBUG: partition.cpp:2153: [72075186224037894][Partition][0][StateIdle] Received TEvKeyValue::TEvResponse 2026-01-08T15:28:44.248468Z node 3 :PERSQUEUE DEBUG: partition_mlp.cpp:122: [72075186224037894][Partition][0][StateIdle] Initializing MLP Consumers: 0 2026-01-08T15:28:44.248555Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:572: [72075186224037894][Partition][0][StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2026-01-08T15:28:44.248571Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.248582Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.248593Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.248605Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.248615Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.248632Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:183: [72075186224037894][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:28:44.248948Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:399: [PQ: 72075186224037894] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MonitoringProjectId: "" 2026-01-08T15:28:44.249010Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:28:44.249114Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3626: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2026-01-08T15:28:44.249899Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1059: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2026-01-08T15:28:44.250050Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1059: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2026-01-08T15:28:44.331120Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.331155Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.331172Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.331190Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.331204Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.346679Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.346721Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.346736Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.346755Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.346768Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.346832Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.347208Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.347222Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.347236Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.347246Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.431432Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.431470Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.431485Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.431504Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.431518Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.446806Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.446840Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.446855Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.446875Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.446891Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:28:44.446961Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:28:44.446972Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.446980Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:28:44.446992Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:28:44.447001Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_sequence/unittest |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/persqueue/public/list_topics/ut/unittest >> TMemoryController::Config_ConsumerLimits [GOOD] >> TMemoryController::SharedCache ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/tablet/ut/unittest >> TabletService_Restart::OnlyAdminsAllowed [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:27:14.118822Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:15.201549Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:15.204679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:27:15.206976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:17.635647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:17.636837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:18.131356Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886030381028 != 1767886030381032 2026-01-08T15:27:18.220590Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:18.367859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:18.601431Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:27:19.105206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:19.165287Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... reading schema ... changing schema (dry run) ... reading schema ... changing schema ... reading schema ... waiting for SysViewsRoster update finished 2026-01-08T15:27:35.520114Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:35.530577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:27:35.532755Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:35.533135Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:35.533285Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:27:35.795445Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:35.795549Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:35.819160Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886048096481 != 1767886048096484 2026-01-08T15:27:35.828104Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:35.876283Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:35.950302Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:27:36.300829Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:36.314930Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... reading schema (without token) ... reading schema (non-admin token) ... reading schema (admin token) ... waiting for SysViewsRoster update finished 2026-01-08T15:27:56.995500Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:57.029040Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:27:57.054459Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:316:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:57.055136Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:57.055263Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:00.047391Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:00.048493Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:00.301383Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [3:34:2081] 1767886061894802 != 1767886061894806 2026-01-08T15:28:00.384606Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:00.587822Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:00.727797Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:01.802855Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:01.888341Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished 2026-01-08T15:28:09.205825Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:09.230209Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:09.234978Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:111:2158], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:09.235347Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:09.235529Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:09.551147Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:09.551305Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:09.580915Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:34:2081] 1767886085824643 != 1767886085824647 2026-01-08T15:28:09.586765Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:09.639743Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:09.733386Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:10.044171Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:10.058550Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished 2026-01-08T15:28:14.403954Z node 5 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:14.412695Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:14.415292Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:297:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:14.415740Z node 5 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//R ... etadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:20.795763Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:20.813975Z node 6 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished 2026-01-08T15:28:25.675109Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:25.689521Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:109:2156], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:25.689706Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:25.689901Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:25.995908Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:25.996087Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:26.031287Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [7:34:2081] 1767886101622314 != 1767886101622318 2026-01-08T15:28:26.039292Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:26.091785Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:26.179939Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:26.491658Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:26.511815Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished 2026-01-08T15:28:31.450179Z node 8 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:31.457566Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:31.462098Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:293:2338], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:31.462466Z node 8 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:31.462619Z node 8 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:31.759521Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:31.759687Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:31.791436Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [8:34:2081] 1767886107457846 != 1767886107457850 2026-01-08T15:28:31.798393Z node 8 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:31.847085Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:31.932666Z node 8 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:32.239767Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:32.254746Z node 8 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished 2026-01-08T15:28:37.511483Z node 9 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:37.520560Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:37.523830Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:316:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:37.524099Z node 9 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:37.524307Z node 9 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:37.827434Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:37.827591Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:37.864064Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [9:34:2081] 1767886113313322 != 1767886113313326 2026-01-08T15:28:37.869680Z node 9 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:37.971273Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:38.054691Z node 9 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:38.373116Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:38.387401Z node 9 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... restarting tablet 72057594046644480 2026-01-08T15:28:38.616429Z node 9 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished 2026-01-08T15:28:43.129133Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:43.138129Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:43.143019Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:315:2358], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:43.143345Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:43.143499Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:43.483782Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:43.483952Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:43.510851Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [10:34:2081] 1767886119469326 != 1767886119469330 2026-01-08T15:28:43.519832Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:43.567525Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:43.642161Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:43.949693Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:43.967251Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... restarting tablet 72057594046644480 (without token) ... restarting tablet 72057594046644480 (non-admin token) ... restarting tablet 72057594046644480 (admin token) 2026-01-08T15:28:44.317202Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/tablet/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable >> TGRpcRateLimiterTest::UpdateResource [GOOD] >> TGRpcRateLimiterTest::DropResource >> DescribeSchemaSecretsService::BigBatchRequest [GOOD] >> DescribeSchemaSecretsService::EmptyBatch >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertBlockedByVolatileNoConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitBlockedByVolatileConflict >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding >> TWriteQuoterTests::WaitDeduplicationIdQuota [GOOD] >> TFetchRequestTests::HappyWay >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> TCreateAndDropViewTest::DropView [GOOD] >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding >> test_cte.py::TestCte::test_toplevel [GOOD] >> Backpressure::MonteCarlo [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/pqtablet/quota/ut/unittest >> TWriteQuoterTests::WaitDeduplicationIdQuota [GOOD] Test command err: processed_blobs=41800 quoted_time=9.980000s 2026-01-08T15:28:37.537306Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013055593499920:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:37.537370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:37.573108Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:28:37.778375Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:37.806290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:37.806450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:37.865857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:37.890328Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:37.954405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003a73/r3tmp/yandexHifBdh.tmp 2026-01-08T15:28:37.954432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003a73/r3tmp/yandexHifBdh.tmp 2026-01-08T15:28:37.954631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003a73/r3tmp/yandexHifBdh.tmp 2026-01-08T15:28:37.954787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:37.981827Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:37.986276Z INFO: TTestServer started on Port 8560 GrpcPort 32118 PQClient connected to localhost:32118 2026-01-08T15:28:38.194933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:38.199864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:28:38.307365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:38.471478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:28:38.543771Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:40.795356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013068478402798:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.795503Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.799640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013068478402809:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.799778Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.802955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013068478402813:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.811745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:40.818523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013068478402842:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.819151Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:40.833567Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013068478402815:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2026-01-08T15:28:41.065334Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013068478402881:2645] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:28:41.109360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:41.208463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:41.369482Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013072773370193:2344], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:28:41.372047Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=YzkxY2U2MzQtNGYxZjhiZC0zZTUxZWRkOC04NDg0ZTlmNg==, ActorId: [1:7593013068478402792:2330], ActorState: ExecuteState, LegacyTraceId: 01kef3jssk17863w0rfernfyht, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:28:41.380676Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:28:41.391557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013072773370480:2826] 2026-01-08T15:28:42.537471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013055593499920:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:42.537545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2026-01-08T15:28:46.851058Z :TEST_CASE_NAME INFO: TTopicSdkTestSetup started |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/persqueue/pqtablet/quota/ut/unittest >> Vacuum::VacuumWithRestart [GOOD] >> Vacuum::OutReadSetsCleanedAfterCopyTable >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding >> KeyValueGRPCService::SimpleConcatUnexistedKeyV2 [GOOD] >> KeyValueGRPCService::SimpleCopyUnexistedKeyV1 |85.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/plan2svg/py3test >> test_cte.py::TestCte::test_toplevel [GOOD] |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |85.6%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/kqp/plan2svg/py3test |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |85.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_sequence/unittest |85.6%| [TM] {RESULT} ydb/core/persqueue/public/list_topics/ut/unittest |85.6%| [TM] {RESULT} ydb/core/grpc_services/tablet/ut/unittest |85.6%| [TM] {RESULT} ydb/core/persqueue/pqtablet/quota/ut/unittest |85.6%| [TM] {RESULT} ydb/tests/functional/kqp/plan2svg/py3test |85.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/backpressure/ut_client/unittest >> Backpressure::MonteCarlo [GOOD] Test command err: Clock# 1970-01-01T00:00:00.000000Z elapsed# 0.000034s EventsProcessed# 0 clients.size# 0 Clock# 1970-01-01T00:00:10.567812Z elapsed# 0.002999s EventsProcessed# 235 clients.size# 1 Clock# 1970-01-01T00:00:25.585878Z elapsed# 0.020895s EventsProcessed# 1930 clients.size# 1 Clock# 1970-01-01T00:00:40.190550Z elapsed# 0.039344s EventsProcessed# 3716 clients.size# 1 Clock# 1970-01-01T00:00:58.631434Z elapsed# 0.062036s EventsProcessed# 5897 clients.size# 1 Clock# 1970-01-01T00:01:12.005932Z elapsed# 0.078229s EventsProcessed# 7494 clients.size# 1 Clock# 1970-01-01T00:01:25.728143Z elapsed# 0.094896s EventsProcessed# 9089 clients.size# 1 Clock# 1970-01-01T00:01:42.253978Z elapsed# 0.115262s EventsProcessed# 11052 clients.size# 1 Clock# 1970-01-01T00:02:00.118224Z elapsed# 0.161980s EventsProcessed# 13121 clients.size# 1 Clock# 1970-01-01T00:02:15.023342Z elapsed# 0.205279s EventsProcessed# 14936 clients.size# 1 Clock# 1970-01-01T00:02:30.301770Z elapsed# 0.270068s EventsProcessed# 16666 clients.size# 1 Clock# 1970-01-01T00:02:46.553489Z elapsed# 0.314300s EventsProcessed# 18600 clients.size# 1 Clock# 1970-01-01T00:03:06.197699Z elapsed# 0.335839s EventsProcessed# 20825 clients.size# 1 Clock# 1970-01-01T00:03:18.245599Z elapsed# 0.371073s EventsProcessed# 22210 clients.size# 1 Clock# 1970-01-01T00:03:33.776472Z elapsed# 0.390700s EventsProcessed# 24101 clients.size# 1 Clock# 1970-01-01T00:03:45.885666Z elapsed# 0.405002s EventsProcessed# 25485 clients.size# 1 Clock# 1970-01-01T00:03:57.264665Z elapsed# 0.418663s EventsProcessed# 26743 clients.size# 1 Clock# 1970-01-01T00:04:14.491858Z elapsed# 0.418900s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:04:29.252453Z elapsed# 0.418921s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:04:39.388965Z elapsed# 0.418940s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:04:51.280819Z elapsed# 0.418958s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:05:02.666075Z elapsed# 0.418978s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:05:20.833891Z elapsed# 0.419000s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:05:37.286104Z elapsed# 0.419020s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:05:55.226546Z elapsed# 0.419036s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:06:10.788718Z elapsed# 0.419055s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:06:28.579287Z elapsed# 0.419074s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:06:46.276675Z elapsed# 0.419106s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:06:59.537767Z elapsed# 0.419126s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:07:10.412389Z elapsed# 0.419149s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:07:22.443823Z elapsed# 0.419167s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:07:33.827737Z elapsed# 0.419186s EventsProcessed# 26745 clients.size# 0 Clock# 1970-01-01T00:07:50.728238Z elapsed# 0.440546s EventsProcessed# 28740 clients.size# 1 Clock# 1970-01-01T00:08:07.168566Z elapsed# 0.461206s EventsProcessed# 30675 clients.size# 1 Clock# 1970-01-01T00:08:19.357720Z elapsed# 0.476643s EventsProcessed# 32122 clients.size# 1 Clock# 1970-01-01T00:08:30.516561Z elapsed# 0.491360s EventsProcessed# 33456 clients.size# 1 Clock# 1970-01-01T00:08:47.252155Z elapsed# 0.520934s EventsProcessed# 35427 clients.size# 1 Clock# 1970-01-01T00:09:04.074161Z elapsed# 0.563629s EventsProcessed# 37404 clients.size# 1 Clock# 1970-01-01T00:09:23.590275Z elapsed# 0.621739s EventsProcessed# 39831 clients.size# 1 Clock# 1970-01-01T00:09:42.837112Z elapsed# 0.672722s EventsProcessed# 42168 clients.size# 1 Clock# 1970-01-01T00:09:57.559738Z elapsed# 0.701189s EventsProcessed# 44003 clients.size# 1 Clock# 1970-01-01T00:10:11.526047Z elapsed# 0.718845s EventsProcessed# 45648 clients.size# 1 Clock# 1970-01-01T00:10:22.374041Z elapsed# 0.732746s EventsProcessed# 46919 clients.size# 1 Clock# 1970-01-01T00:10:40.293708Z elapsed# 0.756695s EventsProcessed# 49129 clients.size# 1 Clock# 1970-01-01T00:10:58.685856Z elapsed# 0.796303s EventsProcessed# 51289 clients.size# 1 Clock# 1970-01-01T00:11:12.675223Z elapsed# 0.830965s EventsProcessed# 53060 clients.size# 1 Clock# 1970-01-01T00:11:29.431284Z elapsed# 0.870132s EventsProcessed# 55051 clients.size# 1 Clock# 1970-01-01T00:11:48.346412Z elapsed# 0.894179s EventsProcessed# 57234 clients.size# 1 Clock# 1970-01-01T00:11:58.721689Z elapsed# 0.894329s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:12:13.535481Z elapsed# 0.894350s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:12:29.700737Z elapsed# 0.894368s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:12:43.911495Z elapsed# 0.894392s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:12:56.980732Z elapsed# 0.894411s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:13:13.875237Z elapsed# 0.894429s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:13:29.351515Z elapsed# 0.894448s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:13:45.677750Z elapsed# 0.894466s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:14:01.331346Z elapsed# 0.894485s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:14:11.386133Z elapsed# 0.894503s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:14:21.756887Z elapsed# 0.894537s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:14:36.865953Z elapsed# 0.894554s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:14:53.213227Z elapsed# 0.894571s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:15:09.683772Z elapsed# 0.894589s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:15:28.519301Z elapsed# 0.894631s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:15:43.222885Z elapsed# 0.894650s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:16:03.128774Z elapsed# 0.894672s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:16:17.359827Z elapsed# 0.894690s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:16:29.421729Z elapsed# 0.894708s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:16:47.533924Z elapsed# 0.894726s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:16:59.026483Z elapsed# 0.894746s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:17:09.068254Z elapsed# 0.894764s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:17:20.783846Z elapsed# 0.894781s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:17:34.214256Z elapsed# 0.894810s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:17:53.859397Z elapsed# 0.894829s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:18:06.056203Z elapsed# 0.894849s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:18:20.588379Z elapsed# 0.894867s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:18:30.945209Z elapsed# 0.894887s EventsProcessed# 57236 clients.size# 0 Clock# 1970-01-01T00:18:47.168143Z elapsed# 0.943911s EventsProcessed# 59168 clients.size# 1 Clock# 1970-01-01T00:19:02.503870Z elapsed# 0.944071s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:19:20.797464Z elapsed# 0.944092s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:19:35.156545Z elapsed# 0.944114s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:19:46.114983Z elapsed# 0.944134s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:20:05.248852Z elapsed# 0.944153s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:20:22.841755Z elapsed# 0.944172s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:20:41.364141Z elapsed# 0.944198s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:20:58.795573Z elapsed# 0.944217s EventsProcessed# 59170 clients.size# 0 Clock# 1970-01-01T00:21:14.080780Z elapsed# 0.984124s EventsProcessed# 61003 clients.size# 1 Clock# 1970-01-01T00:21:33.867576Z elapsed# 1.012883s EventsProcessed# 63360 clients.size# 1 Clock# 1970-01-01T00:21:47.194228Z elapsed# 1.049291s EventsProcessed# 64903 clients.size# 1 Clock# 1970-01-01T00:21:57.988939Z elapsed# 1.084107s EventsProcessed# 66171 clients.size# 1 Clock# 1970-01-01T00:22:16.362994Z elapsed# 1.129475s EventsProcessed# 68395 clients.size# 1 Clock# 1970-01-01T00:22:29.582426Z elapsed# 1.171507s EventsProcessed# 69948 clients.size# 1 Clock# 1970-01-01T00:22:49.473714Z elapsed# 1.229942s EventsProcessed# 72285 clients.size# 1 Clock# 1970-01-01T00:23:00.615777Z elapsed# 1.230092s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:23:11.392271Z elapsed# 1.230113s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:23:26.383150Z elapsed# 1.230130s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:23:40.471688Z elapsed# 1.230152s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:24:00.326887Z elapsed# 1.230168s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:24:17.724016Z elapsed# 1.230184s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:24:35.034437Z elapsed# 1.230203s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:24:50.927095Z elapsed# 1.230220s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:25:07.973680Z elapsed# 1.230238s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:25:18.290461Z elapsed# 1.230257s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:25:34.445439Z elapsed# 1.230286s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:25:54.223344Z elapsed# 1.230305s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:26:08.737461Z elapsed# 1.230323s EventsProcessed# 72287 clients.size# 0 Clock# 1970-01-01T00:26:27.123728Z elapsed# 1.277748s EventsProcessed# 74487 clients.size# 1 Clock# 1970-01-01T00:26:39.231066Z elapsed# 1.315564s EventsProcessed# 75927 clients.size# 1 Clock# 1970-01-01T00:26:56.015966Z elapsed# 1.373698s EventsProcessed# 78030 clients.size# 1 Clock# 1970-01-01T00:27:10.642665Z elapsed# 1.413298s EventsProcessed# 79788 clients.size# 1 Clock# 1970-01-01T00:27:20.873598Z elapsed# 1.442619s EventsProcessed# 81003 clients.size# 1 Clock# 1970-01-01T00:27:33.156334Z elapsed# 1.546738s EventsProcessed# 83994 clients.size# 2 Clock# 1970-01-01T00:27:46.535598Z elapsed# 1.654064s EventsProcessed# 87205 clients.size# 2 Clock# 1970-01-01T00:27:57.346214Z elapsed# 1.747043s EventsProcessed# 89740 clients.size# 2 Clock# 1970-01-01T00:28:12.478758Z elapsed# 1.875917s EventsProcessed# 95176 clients.size# 3 Clock# 1970-01-01T00:28:28.300683Z elapsed# 2.055181s EventsProcessed# 102827 clients.size# 4 Clock# 1970-01-01T00:28:42.799559Z elapsed# 2.235932s EventsProcessed# 109738 clients.size# 4 Clock# 1970-01-01T00:29:00.566894Z elapsed# 2.380377s EventsProcessed# 118224 clients.size# 4 Clock# 1970-01-01T00:29:12.517106Z elapsed# 2.437953s EventsProcessed# 123831 clients.size# 4 Clock# 1970-01-01T00:29:26.388761Z elapsed# 2.573292s EventsProcessed# 132044 clients.size# 5 Clock# 1970-01-01T00:29:43.601387Z elapsed# 2.675522s EventsProcessed# 140162 clients.size# 4 Clock# 1970-01-01T00:30:03.386754Z elapsed# 2.806610s EventsProcessed# 151984 clients.size# 5 Clock# 1970-01-01T00:30:17.990407Z elapsed# 2.905267s EventsProcessed# 160692 clients.size# 5 Clock# 1970-01-01T00:30:32.279052Z elapsed# 3.034387s EventsProcessed# 169373 clients.size# 5 Clock# 1970-01-01T00:30:45.648265Z elapsed# 3.126356s EventsProcessed# 177330 clients.size# 5 Clock# 1970-01-01T00:30:59.146769Z elapsed# 3.283565s EventsProcessed# 187221 clients.size# 6 Clock# 1970-01-01T00:31:09.816982Z elapsed# 3.391896s EventsProcessed# 194919 clients.size# 6 Clock# 1970-01-01T00:31:27.954564Z elapsed# 3.544607s EventsProcessed# 207887 clients.size# 6 Clock# 1970-01-01T00:31:40.396484Z elapsed# 3.664007s EventsProcessed# 216835 clients.size# 6 Clock# 1970-01-01T00:31:51.241121Z elapsed# 3.759687s EventsProcessed# 224508 clients.size# 6 ... s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:29:31.750019Z elapsed# 215.848645s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:29:42.073720Z elapsed# 215.848667s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:30:00.722534Z elapsed# 215.848687s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:30:12.189724Z elapsed# 215.848707s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:30:27.679151Z elapsed# 215.848726s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:30:40.416405Z elapsed# 215.848747s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:30:59.038726Z elapsed# 215.848768s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:31:13.454361Z elapsed# 215.848788s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:31:25.168051Z elapsed# 215.848808s EventsProcessed# 14280121 clients.size# 0 Clock# 1970-01-01T05:31:42.323571Z elapsed# 215.879854s EventsProcessed# 14282187 clients.size# 1 Clock# 1970-01-01T05:32:01.372489Z elapsed# 215.904144s EventsProcessed# 14284390 clients.size# 1 Clock# 1970-01-01T05:32:16.009926Z elapsed# 215.924036s EventsProcessed# 14286196 clients.size# 1 Clock# 1970-01-01T05:32:27.564887Z elapsed# 215.940194s EventsProcessed# 14287637 clients.size# 1 Clock# 1970-01-01T05:32:42.919843Z elapsed# 215.959959s EventsProcessed# 14289402 clients.size# 1 Clock# 1970-01-01T05:32:55.772043Z elapsed# 215.977338s EventsProcessed# 14290962 clients.size# 1 Clock# 1970-01-01T05:33:14.268011Z elapsed# 216.001791s EventsProcessed# 14293165 clients.size# 1 Clock# 1970-01-01T05:33:28.530299Z elapsed# 216.040111s EventsProcessed# 14296574 clients.size# 2 Clock# 1970-01-01T05:33:42.358732Z elapsed# 216.075624s EventsProcessed# 14299787 clients.size# 2 Clock# 1970-01-01T05:33:56.953885Z elapsed# 216.113721s EventsProcessed# 14303238 clients.size# 2 Clock# 1970-01-01T05:34:14.676667Z elapsed# 216.195090s EventsProcessed# 14307594 clients.size# 2 Clock# 1970-01-01T05:34:28.692209Z elapsed# 216.232517s EventsProcessed# 14310885 clients.size# 2 Clock# 1970-01-01T05:34:44.179598Z elapsed# 216.274476s EventsProcessed# 14314506 clients.size# 2 Clock# 1970-01-01T05:35:00.921399Z elapsed# 216.320076s EventsProcessed# 14318614 clients.size# 2 Clock# 1970-01-01T05:35:11.349503Z elapsed# 216.348865s EventsProcessed# 14321219 clients.size# 2 Clock# 1970-01-01T05:35:28.749924Z elapsed# 216.416859s EventsProcessed# 14327208 clients.size# 3 Clock# 1970-01-01T05:35:40.664391Z elapsed# 216.464431s EventsProcessed# 14331528 clients.size# 3 Clock# 1970-01-01T05:35:57.620985Z elapsed# 216.531661s EventsProcessed# 14337578 clients.size# 3 Clock# 1970-01-01T05:36:12.847688Z elapsed# 216.653827s EventsProcessed# 14343056 clients.size# 3 Clock# 1970-01-01T05:36:23.869010Z elapsed# 216.688894s EventsProcessed# 14345685 clients.size# 2 Clock# 1970-01-01T05:36:39.315365Z elapsed# 216.738767s EventsProcessed# 14349364 clients.size# 2 Clock# 1970-01-01T05:36:58.240556Z elapsed# 216.797014s EventsProcessed# 14353847 clients.size# 2 Clock# 1970-01-01T05:37:15.432064Z elapsed# 216.850123s EventsProcessed# 14357939 clients.size# 2 Clock# 1970-01-01T05:37:30.612844Z elapsed# 216.923122s EventsProcessed# 14363477 clients.size# 3 Clock# 1970-01-01T05:37:46.393501Z elapsed# 216.996958s EventsProcessed# 14369187 clients.size# 3 Clock# 1970-01-01T05:38:05.849334Z elapsed# 217.087099s EventsProcessed# 14376147 clients.size# 3 Clock# 1970-01-01T05:38:25.656276Z elapsed# 217.221484s EventsProcessed# 14383336 clients.size# 3 Clock# 1970-01-01T05:38:44.646038Z elapsed# 217.310896s EventsProcessed# 14390106 clients.size# 3 Clock# 1970-01-01T05:39:00.547882Z elapsed# 217.381708s EventsProcessed# 14395649 clients.size# 3 Clock# 1970-01-01T05:39:12.802706Z elapsed# 217.436192s EventsProcessed# 14399948 clients.size# 3 Clock# 1970-01-01T05:39:26.451242Z elapsed# 217.500087s EventsProcessed# 14404801 clients.size# 3 Clock# 1970-01-01T05:39:40.324258Z elapsed# 217.566591s EventsProcessed# 14409707 clients.size# 3 Clock# 1970-01-01T05:39:56.911682Z elapsed# 217.620221s EventsProcessed# 14413655 clients.size# 2 Clock# 1970-01-01T05:40:12.680702Z elapsed# 217.710067s EventsProcessed# 14417369 clients.size# 2 Clock# 1970-01-01T05:40:30.824320Z elapsed# 217.770806s EventsProcessed# 14421628 clients.size# 2 Clock# 1970-01-01T05:40:42.483863Z elapsed# 217.828951s EventsProcessed# 14425827 clients.size# 3 Clock# 1970-01-01T05:40:56.612171Z elapsed# 217.897297s EventsProcessed# 14430840 clients.size# 3 Clock# 1970-01-01T05:41:13.497355Z elapsed# 217.978629s EventsProcessed# 14436889 clients.size# 3 Clock# 1970-01-01T05:41:30.916637Z elapsed# 218.061921s EventsProcessed# 14443127 clients.size# 3 Clock# 1970-01-01T05:41:49.790543Z elapsed# 218.150822s EventsProcessed# 14449727 clients.size# 3 Clock# 1970-01-01T05:42:08.657739Z elapsed# 218.288735s EventsProcessed# 14456490 clients.size# 3 Clock# 1970-01-01T05:42:19.808743Z elapsed# 218.345112s EventsProcessed# 14460633 clients.size# 3 Clock# 1970-01-01T05:42:31.467948Z elapsed# 218.401363s EventsProcessed# 14464898 clients.size# 3 Clock# 1970-01-01T05:42:44.694086Z elapsed# 218.462632s EventsProcessed# 14469481 clients.size# 3 Clock# 1970-01-01T05:42:56.148233Z elapsed# 218.515898s EventsProcessed# 14473477 clients.size# 3 Clock# 1970-01-01T05:43:13.460424Z elapsed# 218.601268s EventsProcessed# 14479766 clients.size# 3 Clock# 1970-01-01T05:43:27.705428Z elapsed# 218.731364s EventsProcessed# 14486602 clients.size# 4 Clock# 1970-01-01T05:43:44.533606Z elapsed# 218.827961s EventsProcessed# 14494735 clients.size# 4 Clock# 1970-01-01T05:43:58.549014Z elapsed# 218.905667s EventsProcessed# 14501364 clients.size# 4 Clock# 1970-01-01T05:44:18.090150Z elapsed# 219.008501s EventsProcessed# 14508201 clients.size# 3 Clock# 1970-01-01T05:44:30.519079Z elapsed# 219.096055s EventsProcessed# 14512665 clients.size# 3 Clock# 1970-01-01T05:44:48.443773Z elapsed# 219.170305s EventsProcessed# 14518947 clients.size# 3 Clock# 1970-01-01T05:45:04.356279Z elapsed# 219.277316s EventsProcessed# 14524567 clients.size# 3 Clock# 1970-01-01T05:45:17.108657Z elapsed# 219.331659s EventsProcessed# 14529038 clients.size# 3 Clock# 1970-01-01T05:45:27.357984Z elapsed# 219.419442s EventsProcessed# 14532528 clients.size# 3 Clock# 1970-01-01T05:45:43.563160Z elapsed# 219.555645s EventsProcessed# 14538304 clients.size# 3 Clock# 1970-01-01T05:46:01.631830Z elapsed# 219.656333s EventsProcessed# 14542612 clients.size# 2 Clock# 1970-01-01T05:46:21.372088Z elapsed# 219.716902s EventsProcessed# 14547216 clients.size# 2 Clock# 1970-01-01T05:46:37.485593Z elapsed# 219.751756s EventsProcessed# 14551022 clients.size# 2 Clock# 1970-01-01T05:46:50.011851Z elapsed# 219.783796s EventsProcessed# 14554099 clients.size# 2 Clock# 1970-01-01T05:47:07.010459Z elapsed# 219.829330s EventsProcessed# 14558189 clients.size# 2 Clock# 1970-01-01T05:47:22.649930Z elapsed# 219.901493s EventsProcessed# 14561915 clients.size# 2 Clock# 1970-01-01T05:47:40.201088Z elapsed# 219.950809s EventsProcessed# 14566304 clients.size# 2 Clock# 1970-01-01T05:47:50.324166Z elapsed# 219.979887s EventsProcessed# 14568880 clients.size# 2 Clock# 1970-01-01T05:48:08.916989Z elapsed# 220.030862s EventsProcessed# 14573215 clients.size# 2 Clock# 1970-01-01T05:48:28.888977Z elapsed# 220.087936s EventsProcessed# 14577891 clients.size# 2 Clock# 1970-01-01T05:48:41.823049Z elapsed# 220.132376s EventsProcessed# 14580941 clients.size# 2 Clock# 1970-01-01T05:49:01.588143Z elapsed# 220.186599s EventsProcessed# 14585622 clients.size# 2 Clock# 1970-01-01T05:49:16.991236Z elapsed# 220.228108s EventsProcessed# 14589181 clients.size# 2 Clock# 1970-01-01T05:49:35.163561Z elapsed# 220.275340s EventsProcessed# 14593453 clients.size# 2 Clock# 1970-01-01T05:49:47.845502Z elapsed# 220.341422s EventsProcessed# 14596405 clients.size# 2 Clock# 1970-01-01T05:50:01.345192Z elapsed# 220.378188s EventsProcessed# 14599610 clients.size# 2 Clock# 1970-01-01T05:50:14.307671Z elapsed# 220.412854s EventsProcessed# 14602639 clients.size# 2 Clock# 1970-01-01T05:50:29.930309Z elapsed# 220.433558s EventsProcessed# 14604478 clients.size# 1 Clock# 1970-01-01T05:50:45.268677Z elapsed# 220.453816s EventsProcessed# 14606320 clients.size# 1 Clock# 1970-01-01T05:50:55.540730Z elapsed# 220.467092s EventsProcessed# 14607500 clients.size# 1 Clock# 1970-01-01T05:51:09.264398Z elapsed# 220.484770s EventsProcessed# 14609102 clients.size# 1 Clock# 1970-01-01T05:51:23.349903Z elapsed# 220.521863s EventsProcessed# 14612392 clients.size# 2 Clock# 1970-01-01T05:51:38.601313Z elapsed# 220.561844s EventsProcessed# 14615951 clients.size# 2 Clock# 1970-01-01T05:51:54.720546Z elapsed# 220.611587s EventsProcessed# 14619735 clients.size# 2 Clock# 1970-01-01T05:52:06.394496Z elapsed# 220.627664s EventsProcessed# 14621148 clients.size# 1 Clock# 1970-01-01T05:52:17.360241Z elapsed# 220.641961s EventsProcessed# 14622392 clients.size# 1 Clock# 1970-01-01T05:52:33.225362Z elapsed# 220.683872s EventsProcessed# 14626041 clients.size# 2 Clock# 1970-01-01T05:52:47.399000Z elapsed# 220.722930s EventsProcessed# 14629471 clients.size# 2 Clock# 1970-01-01T05:53:01.773663Z elapsed# 220.798124s EventsProcessed# 14632826 clients.size# 2 Clock# 1970-01-01T05:53:14.910920Z elapsed# 220.838027s EventsProcessed# 14636156 clients.size# 2 Clock# 1970-01-01T05:53:28.975657Z elapsed# 220.876303s EventsProcessed# 14639511 clients.size# 2 Clock# 1970-01-01T05:53:46.593934Z elapsed# 220.924454s EventsProcessed# 14643726 clients.size# 2 Clock# 1970-01-01T05:54:00.920239Z elapsed# 220.963835s EventsProcessed# 14647135 clients.size# 2 Clock# 1970-01-01T05:54:12.511545Z elapsed# 220.995413s EventsProcessed# 14649857 clients.size# 2 Clock# 1970-01-01T05:54:31.739784Z elapsed# 221.046409s EventsProcessed# 14654360 clients.size# 2 Clock# 1970-01-01T05:54:43.137692Z elapsed# 221.075851s EventsProcessed# 14656958 clients.size# 2 Clock# 1970-01-01T05:54:54.735214Z elapsed# 221.107261s EventsProcessed# 14659733 clients.size# 2 Clock# 1970-01-01T05:55:09.442332Z elapsed# 221.145875s EventsProcessed# 14663185 clients.size# 2 Clock# 1970-01-01T05:55:25.209684Z elapsed# 221.220356s EventsProcessed# 14666830 clients.size# 2 Clock# 1970-01-01T05:55:45.050419Z elapsed# 221.276122s EventsProcessed# 14671574 clients.size# 2 Clock# 1970-01-01T05:56:01.781243Z elapsed# 221.322414s EventsProcessed# 14675626 clients.size# 2 Clock# 1970-01-01T05:56:17.582215Z elapsed# 221.363996s EventsProcessed# 14679223 clients.size# 2 Clock# 1970-01-01T05:56:32.969605Z elapsed# 221.405111s EventsProcessed# 14682928 clients.size# 2 Clock# 1970-01-01T05:56:48.750392Z elapsed# 221.445940s EventsProcessed# 14686607 clients.size# 2 Clock# 1970-01-01T05:57:03.209483Z elapsed# 221.485008s EventsProcessed# 14690086 clients.size# 2 Clock# 1970-01-01T05:57:16.525316Z elapsed# 221.521197s EventsProcessed# 14693233 clients.size# 2 Clock# 1970-01-01T05:57:28.873914Z elapsed# 221.571657s EventsProcessed# 14697523 clients.size# 3 Clock# 1970-01-01T05:57:44.557084Z elapsed# 221.667452s EventsProcessed# 14703065 clients.size# 3 Clock# 1970-01-01T05:58:00.206319Z elapsed# 221.746844s EventsProcessed# 14708606 clients.size# 3 Clock# 1970-01-01T05:58:12.694290Z elapsed# 221.857651s EventsProcessed# 14713202 clients.size# 3 Clock# 1970-01-01T05:58:27.027790Z elapsed# 221.936122s EventsProcessed# 14718350 clients.size# 3 Clock# 1970-01-01T05:58:42.973762Z elapsed# 222.012675s EventsProcessed# 14723956 clients.size# 3 Clock# 1970-01-01T05:58:55.602669Z elapsed# 222.083313s EventsProcessed# 14728474 clients.size# 3 Clock# 1970-01-01T05:59:07.183757Z elapsed# 222.151467s EventsProcessed# 14732610 clients.size# 3 Clock# 1970-01-01T05:59:27.026306Z elapsed# 222.312697s EventsProcessed# 14739729 clients.size# 3 Clock# 1970-01-01T05:59:40.662643Z elapsed# 222.382837s EventsProcessed# 14744783 clients.size# 3 Clock# 1970-01-01T05:59:55.637167Z elapsed# 222.459316s EventsProcessed# 14750001 clients.size# 3 |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/backpressure/ut_client/unittest >> TTxDataShardFilterKMeansScan::BuildToPosting [GOOD] >> TTxDataShardFilterKMeansScan::BuildToBuild |85.6%| [TM] {RESULT} ydb/core/blobstorage/backpressure/ut_client/unittest >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] >> test_yt_reading.py::TestYtReading::test_partitioned_reading [GOOD] >> test_yt_reading.py::TestYtReading::test_block_reading >> TMLPConsumerTests::ReloadPQTabletAfterAlterConsumer [GOOD] >> TMLPConsumerTests::HtmlApp_Success >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries >> DataShardStats::HistogramStatsCorrect [GOOD] >> DataShardStats::BlobsStatsCorrect >> HullReplWriteSst::Basic [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery >> DataShardBackgroundCompaction::ShouldCompact >> TGRpcRateLimiterTest::DropResource [GOOD] >> TGRpcRateLimiterTest::DescribeResource >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/public/describer/ut/ydb-core-persqueue-public-describer-ut |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/public/describer/ut/ydb-core-persqueue-public-describer-ut |85.6%| [LD] {RESULT} $(B)/ydb/core/persqueue/public/describer/ut/ydb-core-persqueue-public-describer-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/repl/ut/unittest >> HullReplWriteSst::Basic [GOOD] Test command err: commit chunk# 1 {ChunkIdx: 1 Offset: 101236736 Size: 32980956} 749565 commit chunk# 2 {ChunkIdx: 2 Offset: 101249024 Size: 32966216} 749230 commit chunk# 3 {ChunkIdx: 3 Offset: 101249024 Size: 32967624} 749262 commit chunk# 4 {ChunkIdx: 4 Offset: 101220352 Size: 32995652} 749899 commit chunk# 5 {ChunkIdx: 5 Offset: 101220352 Size: 32997368} 749938 commit chunk# 6 {ChunkIdx: 6 Offset: 101236736 Size: 32979944} 749542 commit chunk# 7 {ChunkIdx: 7 Offset: 101244928 Size: 32969868} 749313 commit chunk# 8 {ChunkIdx: 8 Offset: 101203968 Size: 33013296} 750300 commit chunk# 9 {ChunkIdx: 9 Offset: 101224448 Size: 32992880} 749836 commit chunk# 10 {ChunkIdx: 10 Offset: 101244928 Size: 32972156} 749365 commit chunk# 11 {ChunkIdx: 11 Offset: 101212160 Size: 33001944} 750042 commit chunk# 12 {ChunkIdx: 12 Offset: 101220352 Size: 32996092} 749909 commit chunk# 13 {ChunkIdx: 13 Offset: 101232640 Size: 32985048} 749658 commit chunk# 14 {ChunkIdx: 14 Offset: 101257216 Size: 32959352} 749074 commit chunk# 15 {ChunkIdx: 15 Offset: 101228544 Size: 32985532} 749669 commit chunk# 16 {ChunkIdx: 16 Offset: 101224448 Size: 32990856} 749790 commit chunk# 17 {ChunkIdx: 17 Offset: 101212160 Size: 33002252} 750049 commit chunk# 18 {ChunkIdx: 18 Offset: 101257216 Size: 32959396} 749075 commit chunk# 19 {ChunkIdx: 19 Offset: 101208064 Size: 33006080} 750136 commit chunk# 20 {ChunkIdx: 20 Offset: 101208064 Size: 33009512} 750214 commit chunk# 21 {ChunkIdx: 21 Offset: 101224448 Size: 32989228} 749753 commit chunk# 22 {ChunkIdx: 22 Offset: 101220352 Size: 32995476} 749895 commit chunk# 23 {ChunkIdx: 23 Offset: 101232640 Size: 32985048} 749658 |85.6%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitBlockedByVolatileConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitBlockedByVolatileNoConflict |85.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |85.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |85.7%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.7%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery |85.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |85.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |85.7%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable [GOOD] >> DataShardFollowers::FollowerStaleRo >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::DropNonexistingView |85.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |85.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |85.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData >> TDescriberTests::TopicExists >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs >> KeyValueGRPCService::SimpleCopyUnexistedKeyV1 [GOOD] >> KeyValueGRPCService::SimpleCopyUnexistedKeyV2 >> Coordinator::ReadStepSubscribe [GOOD] >> Coordinator::LastStepSubscribe >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] >> DataShardReassign::AutoReassignOnYellowFlag >> DataShardBackgroundCompaction::ShouldCompact [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed >> Vacuum::OutReadSetsCleanedAfterCopyTable [GOOD] >> Vacuum::BorrowerDataCleanedAfterCopyTable >> test_yt_reading.py::TestYtReading::test_block_reading [GOOD] >> test_ctas.py::TestYtCtas::test_simple_ctast >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount |85.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |85.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |85.7%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut >> DescribeSchemaSecretsService::EmptyBatch [GOOD] >> DescribeSchemaSecretsService::MixedGrantsInBatch >> TGRpcRateLimiterTest::DescribeResource [GOOD] >> TGRpcRateLimiterTest::ListResources >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections >> TTxDataShardFilterKMeansScan::BuildToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BadRequest |85.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydbd/ydbd |85.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection |85.7%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount >> DataShardFollowers::FollowerStaleRo [GOOD] >> DataShardFollowers::FollowerRebootAfterSysCompaction >> KesusProxyTest::ReconnectsWithKesusWhenNotConnected [GOOD] >> KesusProxyTest::ReconnectsWithKesusWhenPipeDestroyed [GOOD] >> KesusProxyTest::ReconnectsWithKesusAfterSeveralRetries >> KesusProxyTest::ReconnectsWithKesusAfterSeveralRetries [GOOD] >> KesusProxyTest::RejectsNotCanonizedResourceName [GOOD] >> KesusProxyTest::SubscribesOnResource [GOOD] >> KesusProxyTest::SubscribesOnResourcesWhenReconnected [GOOD] >> KesusProxyTest::ProxyRequestDuringDisconnection [GOOD] >> KesusProxyTest::DeactivateSessionWhenResourceClosed [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnSuccess >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnSuccess [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnFailure [GOOD] >> KesusProxyTest::AnswersWithSessionWhenResourceIsAlreadyKnown [GOOD] >> KesusProxyTest::SendsBrokenUpdateWhenKesusPassesError [GOOD] >> KesusProxyTest::AllocatesResourceWithKesus [GOOD] >> KesusProxyTest::DisconnectsDuringActiveSession [GOOD] >> KesusProxyTest::AllocatesResourceOffline [GOOD] >> KesusProxyTest::ConnectsDuringOfflineAllocation [GOOD] >> KesusResourceAllocationStatisticsTest::ReturnsDefaultValues [GOOD] >> KesusResourceAllocationStatisticsTest::CalculatesAverage [GOOD] >> KesusResourceAllocationStatisticsTest::TakesBestStat [GOOD] >> TQuoterServiceTest::StaticRateLimiter >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitBlockedByVolatileNoConflict [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertNotBlockedByOlderVolatile >> TCreateAndDropViewTest::DropNonexistingView [GOOD] >> TCreateAndDropViewTest::CallDropViewOnTable >> TFetchRequestTests::HappyWay [GOOD] >> TFetchRequestTests::CDC >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection >> TIndexProcesorTests::TestCreateIndexProcessor >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding >> KeyValueGRPCService::SimpleCopyUnexistedKeyV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadV1 >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings >> TGRpcRateLimiterTest::ListResources [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding >> DataShardStats::BlobsStatsCorrect [GOOD] >> DataShardStats::SharedCacheGarbage >> TIndexProcesorTests::TestCreateIndexProcessor [GOOD] >> TIndexProcesorTests::TestSingleCreateQueueEvent >> TMLPConsumerTests::HtmlApp_Success [GOOD] >> TMLPConsumerTests::HtmlApp_BadConsumer >> TQuoterServiceTest::StaticRateLimiter [GOOD] >> TQuoterServiceTest::StaticMultipleAndResources >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/py3test >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/tools/kqprun/tests/py3test >> TTxDataShardLocalKMeansScan::BadRequest [GOOD] >> TTxDataShardLocalKMeansScan::TooManyClusters >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding >> Vacuum::BorrowerDataCleanedAfterCopyTable [GOOD] >> test_ctas.py::TestYtCtas::test_simple_ctast [GOOD] >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/py3test >> test_ctas.py::TestYtCtas::test_simple_ctast [GOOD] |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/fq/yt/kqp_yt_import/py3test >> TDescriberTests::TopicExists [GOOD] >> TDescriberTests::TopicNotExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_vacuum/unittest >> Vacuum::BorrowerDataCleanedAfterCopyTable [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:19.441866Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:19.568556Z node 1 :BS_NODE CRIT: {NWDC41@distconf_quorum.cpp:167} configuration incorrect Error# VDisk [0:1:0:0:0] does not match any static group 2026-01-08T15:28:19.581470Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:226:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:19.581846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:19.582005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:19.888093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:19.888217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:19.973641Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:36:2083] 1767886096672747 != 1767886096672751 2026-01-08T15:28:19.989485Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:20.037037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:20.138514Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:20.457137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:20.471153Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:20.581124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:21.005868Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:997:2848], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.005995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:986:2843], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.009822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.010902Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1002:2853], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.011105Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:21.022155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:21.183389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1000:2851], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:21.266040Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:1058:2890] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ... waiting for SysViewsRoster update finished 2026-01-08T15:28:26.332752Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:26.339598Z node 2 :BS_NODE CRIT: {NWDC41@distconf_quorum.cpp:167} configuration incorrect Error# VDisk [0:1:0:0:0] does not match any static group 2026-01-08T15:28:26.341048Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:26.343379Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:304:2348], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:26.343730Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:26.343859Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:26.598963Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:26.599103Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:26.628536Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:36:2083] 1767886103041204 != 1767886103041207 2026-01-08T15:28:26.634441Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:26.682532Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:26.778352Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:27.095310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:27.111827Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:27.222387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:27.547158Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:989:2841], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:27.547273Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:978:2836], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:27.547354Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:27.548364Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:994:2846], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:27.548893Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:27.557929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:27.696130Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:992:2844], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:28:27.732774Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:1049:2882] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ... waiting for SysViewsRoster update finished 2026-01-08T15:28:32.989455Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:33.010594Z node 3 :BS_NODE CRIT: {NWDC41@distconf_quorum.cpp:167} configuration incorrect Error# VDisk [0:1:0:0:0] does ... , LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ... waiting for SysViewsRoster update finished 2026-01-08T15:28:54.503724Z node 6 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:54.507817Z node 6 :BS_NODE CRIT: {NWDC41@distconf_quorum.cpp:167} configuration incorrect Error# VDisk [0:1:0:0:0] does not match any static group 2026-01-08T15:28:54.511187Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:54.513794Z node 6 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:54.513872Z node 6 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:54.514171Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:317:2361], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:54.809199Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:54.809311Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:54.832921Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [6:36:2083] 1767886130501575 != 1767886130501579 2026-01-08T15:28:54.835725Z node 6 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:54.878538Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:54.960050Z node 6 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:55.227518Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:55.241133Z node 6 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:55.345511Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:55.894499Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:1076:2915], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:55.894709Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:1086:2920], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:55.894818Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:55.896015Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:1090:2923], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:55.896225Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:55.902172Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:56.087622Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:1091:2924], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:28:56.125079Z node 6 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [6:1150:2964] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ... waiting for SysViewsRoster update finished 2026-01-08T15:29:02.515250Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:29:02.521024Z node 7 :BS_NODE CRIT: {NWDC41@distconf_quorum.cpp:167} configuration incorrect Error# VDisk [0:1:0:0:0] does not match any static group 2026-01-08T15:29:02.522038Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:29:02.526726Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:112:2159], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:29:02.526990Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:29:02.527149Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:29:02.777941Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:02.778086Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:02.802981Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [7:36:2083] 1767886138791874 != 1767886138791878 2026-01-08T15:29:02.810358Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:02.854519Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:02.937488Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:29:03.220997Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:03.238780Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:03.360924Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:03.947290Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:1076:2915], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:03.947436Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:1086:2920], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:03.947544Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:03.948805Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:1090:2923], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:03.948902Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:03.954979Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:29:04.143204Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:1091:2924], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:29:04.180620Z node 7 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [7:1150:2964] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:05.551287Z node 7 :TX_DATASHARD WARN: datashard__vacuum.cpp:37: Vacuum of tablet# 72075186224037888: has borrowed parts, requested from [7:832:2727] |85.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_vacuum/unittest >> DSProxyStrategyTest::Restore_block42 [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_reassign/unittest >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:29:01.765691Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:29:01.899980Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:29:01.900350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:29:01.900510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:29:02.198990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:02.199151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:02.263310Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886138643936 != 1767886138643940 2026-01-08T15:29:02.273572Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:02.320945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:02.417491Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:29:02.686735Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:190} Tx{267, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose 2026-01-08T15:29:02.686797Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:190} Tx{267, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.688385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:02.688581Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:190} Tx{267, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} hope 1 -> done Change{189, redo 997b alter 0b annex 0, ~{ 1, 33, 35, 42, 4 } -{ }, 0 gb} 2026-01-08T15:29:02.688636Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:190} Tx{267, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.689346Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:190:1:24576:527:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.689450Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:190:0:0:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.689524Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:191} commited cookie 1 for step 190 2026-01-08T15:29:02.689826Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:191} Tx{268, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2026-01-08T15:29:02.689871Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:191} Tx{268, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.690014Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:191} Tx{268, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{190, redo 175b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2026-01-08T15:29:02.690052Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:191} Tx{268, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.690270Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:191:1:24576:132:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.690361Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:191:0:0:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.690411Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:192} commited cookie 1 for step 191 2026-01-08T15:29:02.690543Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:192} Tx{269, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2026-01-08T15:29:02.690568Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:192} Tx{269, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.690674Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:192} Tx{269, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{191, redo 175b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2026-01-08T15:29:02.690698Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:192} Tx{269, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.690860Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:192:1:24576:132:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.690910Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:192:0:0:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.690959Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:193} commited cookie 1 for step 192 2026-01-08T15:29:02.691032Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:193} Tx{270, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2026-01-08T15:29:02.691057Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:193} Tx{270, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.691155Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:193} Tx{270, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{192, redo 121b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2026-01-08T15:29:02.691197Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:193} Tx{270, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.691369Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:193:1:24576:97:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.691411Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:193:0:0:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.691449Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} commited cookie 1 for step 193 2026-01-08T15:29:02.691930Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{271, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion 2026-01-08T15:29:02.691983Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{271, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.692051Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{271, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} hope 1 -> done Change{193, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:02.692088Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{271, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.702951Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:02.799453Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:9} Tx{11, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:29:02.799556Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:9} Tx{11, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.799813Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:9} Tx{11, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{8, redo 264b alter 0b annex 0, ~{ 0, 4, 2 } -{ }, 0 gb} 2026-01-08T15:29:02.799874Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:9} Tx{11, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.800393Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:9:1:24576:165:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.800489Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:9:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.800589Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:10} commited cookie 1 for step 9 2026-01-08T15:29:02.801112Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{272, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPlanStep} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationPlanStep 2026-01-08T15:29:02.801181Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{272, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:02.801730Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{272, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPlanStep} hope 1 -> done Change{193, redo 819b alter 0b annex 0, ~{ 1, 18, 33, 27, 35, 42, 48, 4 } -{ }, 0 gb} 2026-01-08T15:29:02.801812Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:194} Tx{272, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:02.802235Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:194:1:24576:427:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.802291Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046644480:2:194:0:0:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:02.802405Z node 1 :TABLET_EXECUTOR DEBUG: Leader{7205759404 ... 06969Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:05.607444Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:26:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.607523Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.607613Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} commited cookie 1 for step 26 2026-01-08T15:29:05.754362Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} Tx{30, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:29:05.754496Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} Tx{30, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:05.754678Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} Tx{30, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{26, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2026-01-08T15:29:05.754735Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} Tx{30, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:05.755137Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:27:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.755209Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:27:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.755317Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:28} commited cookie 1 for step 27 2026-01-08T15:29:05.905410Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:28} Tx{31, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:29:05.905525Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:28} Tx{31, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:05.905709Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:28} Tx{31, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{27, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2026-01-08T15:29:05.905773Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:28} Tx{31, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:05.906254Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:28:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.906344Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:28:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:05.906443Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:29} commited cookie 1 for step 28 2026-01-08T15:29:05.919266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:29:05.919337Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:05.946059Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} queued, type NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics 2026-01-08T15:29:05.946179Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:05.946289Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} hope 1 -> done Change{9, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:05.946389Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.094149Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:29} Tx{32, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:29:06.094265Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:29} Tx{32, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:06.094457Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:29} Tx{32, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{28, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2026-01-08T15:29:06.094524Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:29} Tx{32, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.095040Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:29:1:24576:89:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:06.095122Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:29:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:06.095224Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:30} commited cookie 1 for step 29 2026-01-08T15:29:06.211813Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2026-01-08T15:29:06.211906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3179: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2026-01-08T15:29:06.212057Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} queued, type NKikimr::NDataShard::TDataShard::TTxCleanupTransaction 2026-01-08T15:29:06.212141Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:06.212225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 16000 last cleanup 0 2026-01-08T15:29:06.212304Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:29:06.212348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2026-01-08T15:29:06.212388Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:29:06.212425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:29:06.212520Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:06.212641Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.212852Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:29:06.290665Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:30} Tx{33, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:29:06.290785Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:30} Tx{33, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:06.291034Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:30} Tx{33, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{29, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2026-01-08T15:29:06.291100Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:30} Tx{33, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.291566Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:30:1:24576:87:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:06.291624Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594046316545:2:30:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:06.291718Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:31} commited cookie 1 for step 30 2026-01-08T15:29:06.316688Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2026-01-08T15:29:06.316771Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:06.317107Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{13, redo 193b alter 0b annex 0, ~{ 1, 16, 4 } -{ }, 0 gb} 2026-01-08T15:29:06.317161Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} Tx{18, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.317404Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:12} Tx{19, NKikimr::NHive::TTxProcessTabletMetrics} queued, type NKikimr::NHive::TTxProcessTabletMetrics 2026-01-08T15:29:06.317476Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:12} Tx{19, NKikimr::NHive::TTxProcessTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:06.317637Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:12} Tx{19, NKikimr::NHive::TTxProcessTabletMetrics} hope 1 -> done Change{14, redo 329b alter 0b annex 0, ~{ 16 } -{ }, 0 gb} 2026-01-08T15:29:06.317756Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:12} Tx{19, NKikimr::NHive::TTxProcessTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:06.333188Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:56: Put Result: TEvPutResult {Id# [72057594037968897:2:11:0:0:265:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2026-01-08T15:29:06.333329Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:12} commited cookie 1 for step 11 --- Captured TEvCheckBlobstorageStatusResult event --- Waiting for TEvReassignTablet event... 2026-01-08T15:29:06.474782Z node 1 :TABLET_EXECUTOR NOTICE: Leader{72075186224037888:1:10} CheckYellow current light yellow move channels: [ 0 1 ] 2026-01-08T15:29:06.474870Z node 1 :TABLET_EXECUTOR NOTICE: Leader{72075186224037888:1:10} CheckYellow reassign channels: [ 0 1 ] tablet# 72075186224037888 hive# 72057594037968897 --- Captured TEvReassignTablet event |85.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_reassign/unittest >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> TIndexProcesorTests::TestSingleCreateQueueEvent [GOOD] >> TIndexProcesorTests::TestReindexSingleQueue >> DataShardFollowers::FollowerRebootAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterSysCompaction >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery >> TCreateAndDropViewTest::CallDropViewOnTable [GOOD] >> TCreateAndDropViewTest::DropSameViewTwice >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable >> TQuoterServiceTest::StaticMultipleAndResources [GOOD] >> TQuoterServiceTest::StaticDeadlines >> DataShardSnapshotIsolation::ReadWriteUncommittedUpsertNotBlockedByOlderVolatile [GOOD] >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitNotBlockedByOlderVolatile |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_block42 [GOOD] |85.7%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi >> Coordinator::LastStepSubscribe [GOOD] >> Coordinator::RestoreDomainConfiguration >> TIndexProcesorTests::TestReindexSingleQueue [GOOD] >> TIndexProcesorTests::TestDeletedQueueNotReindexed >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] >> KeyValueGRPCService::SimpleWriteReadV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadV2 >> DescribeSchemaSecretsService::MixedGrantsInBatch [GOOD] >> DescribeSchemaSecretsService::SchemeCacheRetryErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] Test command err: 2026-01-08T15:26:07.231745Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1767885967231376 2026-01-08T15:26:08.294572Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012417800399907:2267];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:08.294784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:08.421185Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593012416134616972:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:08.421222Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:08.530428Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.530737Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.748173Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.746440Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.930281Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.976567Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.296295Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.306619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:09.474481Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:09.482598Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.530238Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:09.636766Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:10.219156Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:10.362660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:10.726751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:10.727517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:10.734900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:10.735133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:10.828617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:10.831161Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:26:10.881314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:11.363558Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:11.411096Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:11.970926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003de9/r3tmp/yandexEcIv0o.tmp 2026-01-08T15:26:11.970954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003de9/r3tmp/yandexEcIv0o.tmp 2026-01-08T15:26:11.971095Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003de9/r3tmp/yandexEcIv0o.tmp 2026-01-08T15:26:11.971171Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:12.080474Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:12.155644Z INFO: TTestServer started on Port 6043 GrpcPort 19150 PQClient connected to localhost:19150 2026-01-08T15:26:12.523038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:12.662563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2026-01-08T15:26:13.301852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012417800399907:2267];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:13.301931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:13.422410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593012416134616972:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:13.422467Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:24.632076Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3e9ay021akerjmkhbamdh", Request deadline has expired for 6.833412s seconds 2026-01-08T15:26:24.633527Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3efm2fv7hmtkzxatsda0w", Request deadline has expired for 0.484204s seconds 2026-01-08T15:26:24.664709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012486519877933:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.670380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.701705Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012484854094068:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.702088Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012484854094063:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.703100Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.713418Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012484854094079:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.713509Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.726361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012486519877948:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.759145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012486519877979:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.759826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.763052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012486519877982:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:24.763805Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: ... 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2026-01-08T15:29:05.835274Z node 11 :PQ_READ_PROXY DEBUG: partition_actor.cpp:902: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 after read state TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid 412cbc08-641dc1ae-e4fa7af8-a7262f2d has messages 1 2026-01-08T15:29:05.835438Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1951: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 read done: guid# 412cbc08-641dc1ae-e4fa7af8-a7262f2d, partition# TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1), size# 549 2026-01-08T15:29:05.835476Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2113: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 response to read: guid# 412cbc08-641dc1ae-e4fa7af8-a7262f2d 2026-01-08T15:29:05.835732Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2156: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 Process answer. Aval parts: 0 2026-01-08T15:29:05.836488Z :DEBUG: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:29:05.836252Z node 12 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:05.836289Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.836305Z node 12 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:05.836329Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.836346Z node 12 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:05.837149Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 grpc read done: success# 1, data# { read { } } 2026-01-08T15:29:05.837333Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1850: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 got read request: guid# 21b2e111-d2ecf62f-8ef69661-76427926 2026-01-08T15:29:05.837422Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (0-3) 2026-01-08T15:29:05.837644Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (0-0) 2026-01-08T15:29:05.837719Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 0} (1-1) 2026-01-08T15:29:05.837755Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 0} (2-2) 2026-01-08T15:29:05.837781Z :DEBUG: [/Root] Take Data. Partition 0. Read: {2, 1} (3-3) 2026-01-08T15:29:05.837840Z :DEBUG: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] [null] The application data is transferred to the client. Number of messages 4, size 14 bytes 2026-01-08T15:29:05.838088Z :INFO: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] Closing read session. Close timeout: 0.000000s 2026-01-08T15:29:05.838144Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:test-topic:0:1:3:0 2026-01-08T15:29:05.838195Z :INFO: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] Counters: { Errors: 0 CurrentSessionLifetimeMs: 30 BytesRead: 14 MessagesRead: 4 BytesReadCompressed: 74 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:29:05.838333Z :NOTICE: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:29:05.838396Z :DEBUG: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] [null] Abort session to cluster 2026-01-08T15:29:05.839558Z node 11 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 grpc read done: success# 0, data# { } 2026-01-08T15:29:05.839588Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 grpc read failed 2026-01-08T15:29:05.839619Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:1678: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 closed 2026-01-08T15:29:05.839706Z :NOTICE: [/Root] [/Root] [222ba6b3-a71df191-749d684d-a52d6615] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:29:05.840048Z node 11 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_11_1_14721764211515663056_v1 is DEAD 2026-01-08T15:29:05.840805Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0] Write session: close. Timeout = 0 ms 2026-01-08T15:29:05.840905Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0] Write session will now close 2026-01-08T15:29:05.840962Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0] Write session: aborting 2026-01-08T15:29:05.841139Z node 11 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [11:7593013179860709616:2543] disconnected. 2026-01-08T15:29:05.841178Z node 11 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [11:7593013179860709616:2543] disconnected; active server actors: 1 2026-01-08T15:29:05.841202Z node 11 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [11:7593013179860709616:2543] client user disconnected session shared/user_11_1_14721764211515663056_v1 2026-01-08T15:29:05.841368Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0] Write session: gracefully shut down, all writes complete 2026-01-08T15:29:05.841423Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0] Write session: destroy 2026-01-08T15:29:05.842289Z node 11 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0 grpc read done: success: 0 data: 2026-01-08T15:29:05.842329Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0 grpc read failed 2026-01-08T15:29:05.842364Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0 grpc closed 2026-01-08T15:29:05.842386Z node 11 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-message-group-id|d13538ae-19fd638-63f0553d-8baf7506_0 is DEAD 2026-01-08T15:29:05.843201Z node 11 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:29:05.846997Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_11_1_14721764211515663056_v1 2026-01-08T15:29:05.847063Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [11:7593013179860709619:2546] destroyed 2026-01-08T15:29:05.847102Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [11:7593013154090905545:2483] destroyed 2026-01-08T15:29:05.847146Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:29:05.847193Z node 12 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:05.847215Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.847240Z node 12 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:05.847264Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.847281Z node 12 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:05.850263Z node 12 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_11_1_14721764211515663056_v1 2026-01-08T15:29:05.936397Z node 12 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:05.936439Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.936454Z node 12 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:05.936475Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:05.936494Z node 12 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:06.037378Z node 12 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:06.037418Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:06.037433Z node 12 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:06.037455Z node 12 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:06.037470Z node 12 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:06.538517Z node 11 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [11:7593013184155676946:2547] TxId: 281474976715681. Ctx: { TraceId: 01kef3kj9x3jrt9rzrfeqkpmkg, Database: /Root, SessionId: ydb://session/3?node_id=11&id=YjFlN2QzMTUtNGUzNTRjNDMtMWNhNWQzOTQtMjA3Njk5ZTU=, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 12 trace_id# 2026-01-08T15:29:06.538740Z node 11 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [11:7593013184155676950:2547], TxId: 281474976715681, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3kj9x3jrt9rzrfeqkpmkg. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=11&id=YjFlN2QzMTUtNGUzNTRjNDMtMWNhNWQzOTQtMjA3Njk5ZTU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. }. Handle abort execution event from: [11:7593013184155676946:2547], status: UNAVAILABLE, reason: {
: Error: Terminate execution } |85.7%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor [GOOD] |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData >> TFetchRequestTests::CDC [GOOD] >> TFetchRequestTests::SmallBytesRead >> ProtoTests::CreateQueueFiller [GOOD] >> ProtoTests::UpdateQueueFiller [GOOD] >> ProtoTests::DeleteQueueFiller [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs >> TQuoterServiceTest::StaticDeadlines [GOOD] >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob >> TMemoryController::SharedCache [GOOD] >> TMemoryController::SharedCache_ConfigLimit ------- [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> ProtoTests::DeleteQueueFiller [GOOD] Test command err: 2026-01-08T15:29:01.756855Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013162234666782:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:01.757192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:01.958813Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:01.980130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:01.980264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:02.037653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:02.038208Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:02.039800Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013162234666678:2081] 1767886141752442 != 1767886141752445 2026-01-08T15:29:02.116668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:02.116707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:02.116718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:02.116831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:02.139162Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:02.287443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:02.766825Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:04.581440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013175119569511:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:04.581592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:04.581747Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013175119569519:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:04.582051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013175119569525:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:04.582130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:04.586289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:29:04.596809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:04.604232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013175119569526:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:29:04.740265Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013175119569664:2629] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,ResourceId,UserSID,MaskedToken,AuthType,PeerName,RequestId,Labels)VALUES(1767886145774,6427010455965490393,'queue1','CreateMessageQueue','cloud1','folder1','/Root/sqs/folder/queue1','username','maskedToken123','authtype','localhost:8000','req1','{"k1" : "v1"}'); End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,ResourceId,UserSID,MaskedToken,AuthType,PeerName,RequestId,Labels)VALUES(1767886145917,17720312889752869786,'queue1','UpdateMessageQueue','cloud1','folder1','/Root/sqs/folder/queue1','username','maskedToken123','authtype','localhost:8000','req1','{"k1" : "v1"}'); End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,ResourceId,UserSID,MaskedToken,AuthType,PeerName,RequestId,Labels)VALUES(1767886146037,16889276051852881242,'queue1','DeleteMessageQueue','cloud1','folder1','/Root/sqs/folder/queue1','username','maskedToken123','authtype','localhost:8000','req1','{"k1" : "v1"}'); End execute query=== 2026-01-08T15:29:06.764658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013162234666782:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:06.769588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:29:02.027628Z: component=schemeshard, tx_id=281474976715657, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/.sys], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.028572Z: component=schemeshard, tx_id=281474976715658, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_permissions], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.028858Z: component=schemeshard, tx_id=281474976715659, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_effective_permissions], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029012Z: component=schemeshard, tx_id=281474976715660, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_users], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029158Z: component=schemeshard, tx_id=281474976715661, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/pg_tables], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029285Z: component=schemeshard, tx_id=281474976715662, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/ds_pdisks], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029437Z: component=schemeshard, tx_id=281474976715663, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_group_members], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029617Z: component=schemeshard, tx_id=281474976715664, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_partitions_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029769Z: component=schemeshard, tx_id=281474976715665, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_request_units_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.029900Z: component=schemeshard, tx_id=281474976715666, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/ds_vslots], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030023Z: component=schemeshard, tx_id=281474976715667, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_partitions_by_tli_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030211Z: component=schemeshard, tx_id=281474976715668, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/nodes], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030371Z: component=schemeshard, tx_id=281474976715669, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_cpu_time_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030499Z: component=schemeshard, tx_id=281474976715670, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_read_bytes_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030614Z: component=schemeshard, tx_id=281474976715671, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_read_bytes_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030743Z: component=schemeshard, tx_id=281474976715672, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/streaming_queries], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030868Z: component=schemeshard, tx_id=281474976715673, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/hive_tablets], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.030985Z: component=schemeshard, tx_id=281474976715674, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/ds_storage_pools], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.031112Z: component=schemeshard, tx_id=281474976715675, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_partitions_by_tli_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.031225Z: component=schemeshard, tx_id=281474976715676, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/resource_pool_classifiers], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.031342Z: component=schemeshard, tx_id=281474976715677, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/compile_cache_queries], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032076Z: component=schemeshard, tx_id=281474976715678, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_owners], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032271Z: component=schemeshard, tx_id=281474976715679, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/query_sessions], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032401Z: component=schemeshard, tx_id=281474976715680, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_duration_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032505Z: component=schemeshard, tx_id=281474976715681, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/pg_class], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032628Z: component=schemeshard, tx_id=281474976715682, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/tables], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032761Z: component=schemeshard, tx_id=281474976715683, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_cpu_time_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.032910Z: component=schemeshard, tx_id=281474976715684, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_request_units_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033013Z: component=schemeshard, tx_id=281474976715685, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/partition_stats], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033113Z: component=schemeshard, tx_id=281474976715686, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/ds_groups], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033263Z: component=schemeshard, tx_id=281474976715687, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/auth_groups], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033382Z: component=schemeshard, tx_id=281474976715688, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/resource_pools], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033480Z: component=schemeshard, tx_id=281474976715689, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/query_metrics_one_minute], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033624Z: component=schemeshard, tx_id=281474976715690, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/ds_storage_stats], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033768Z: component=schemeshard, tx_id=281474976715691, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_partitions_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.033896Z: component=schemeshard, tx_id=281474976715692, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE SYSTEM VIEW, paths=[/Root/.sys/top_queries_by_duration_one_hour], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.288003Z: component=schemeshard, tx_id=281474976710657, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//Root], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.294403Z: component=schemeshard, tx_id=281474976710658, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:02.525169Z: component=schemeshard, tx_id=281474976710659, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS/Root/SQS/CreateCloudEventProcessor], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:04.587596Z: component=schemeshard, tx_id=281474976710660, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[.metadata/workload_manager/pools/default], status=SUCCESS, detailed_status=StatusAccepted, new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2026-01-08T15:29:04.599133Z: component=schemeshard, tx_id=281474976710661, remote_address=::1, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE TABLE, paths=[/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq], status=SUCCESS, detailed_status=StatusAccepted 2026-01-08T15:29:04.739852Z: component=schemeshard, tx_id=281474976710662, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[default], status=SUCCESS, detailed_status=StatusAlreadyExists, reason=Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges), new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2026-01-08T15:29:07.257474Z: component=ymq, id=6427010455965490393$CreateMessageQueue$2026-01-08T15:29:07.257141Z, operation=CreateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.create, created_at=2026-01-08T15:29:05.774000Z, cloud_id=cloud1, folder_id=folder1, resource_id=/Root/sqs/folder/queue1, request_id=req1, idempotency_id=6427010455965490393$CreateMessageQueue$2026-01-08T15:29:05.774000Z, queue=queue1, labels={"k1" : "v1"} 2026-01-08T15:29:07.257693Z: component=ymq, id=17720312889752869786$UpdateMessageQueue$2026-01-08T15:29:07.257214Z, operation=UpdateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.setAttributes, created_at=2026-01-08T15:29:05.917000Z, cloud_id=cloud1, folder_id=folder1, resource_id=/Root/sqs/folder/queue1, request_id=req1, idempotency_id=17720312889752869786$UpdateMessageQueue$2026-01-08T15:29:05.917000Z, queue=queue1, labels={"k1" : "v1"} 2026-01-08T15:29:07.257817Z: component=ymq, id=16889276051852881242$DeleteMessageQueue$2026-01-08T15:29:07.257277Z, operation=DeleteMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.delete, created_at=2026-01-08T15:29:06.037000Z, cloud_id=cloud1, folder_id=folder1, resource_id=/Root/sqs/folder/queue1, request_id=req1, idempotency_id=16889276051852881242$DeleteMessageQueue$2026-01-08T15:29:06.037000Z, queue=queue1, labels={"k1" : "v1"} |85.7%| [TS] {BAZEL_UPLOAD} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection >> TIndexProcesorTests::TestDeletedQueueNotReindexed [GOOD] >> TIndexProcesorTests::TestManyMessages >> Coordinator::RestoreDomainConfiguration [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount >> TTxDataShardLocalKMeansScan::TooManyClusters [GOOD] >> TTxDataShardLocalKMeansScan::MainToPosting >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter >> DataShardFollowers::FollowerAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterDataCompaction >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] >> TCreateAndDropViewTest::DropSameViewTwice [GOOD] >> TCreateAndDropViewTest::DropViewIfExists >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection >> DataShardSnapshotIsolation::ReadWriteUpsertOnCommitNotBlockedByOlderVolatile [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertBlockedByOlderVolatile >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] >> KeyValueGRPCService::SimpleWriteReadV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPathV1 >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection >> TStateStorageConfig::UniformityTest [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame1 >> TDescriberTests::TopicNotExists [GOOD] >> TDescriberTests::TopicNotTopic >> TMLPConsumerTests::HtmlApp_BadConsumer [GOOD] >> TMLPConsumerTests::HtmlApp_BadPartition >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount >> TIndexProcesorTests::TestManyMessages [GOOD] >> TIndexProcesorTests::TestOver1000Queues >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath [GOOD] >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection >> TMemoryController::SharedCache_ConfigLimit [GOOD] >> TMemoryController::MemTable >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_background_compaction/unittest >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:56.322382Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:56.437285Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:56.437628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:56.437768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:56.694037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:56.694185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:56.771604Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886134219403 != 1767886134219407 2026-01-08T15:28:56.791202Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:56.836457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:56.926421Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:57.203119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:57.216421Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:57.316352Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:28:57.316418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:28:57.316530Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:861:2744] 2026-01-08T15:28:57.478414Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:861:2744] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:28:57.478556Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:861:2744] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:28:57.479174Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:28:57.479273Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:861:2744] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:28:57.479606Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:28:57.479734Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:861:2744] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:28:57.479871Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:861:2744] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:28:57.481823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:57.482394Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:28:57.483062Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:861:2744] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:28:57.483147Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:861:2744] txid# 281474976715657 SEND to# [1:829:2724] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:28:57.515356Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:57.516425Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:57.516765Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:28:57.517040Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:57.569496Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:57.570280Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:57.570416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:57.572249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:28:57.572332Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:28:57.572411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:28:57.572771Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:57.572932Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:57.573026Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:28:57.586937Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:57.626718Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:28:57.626951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:28:57.627063Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:28:57.627122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:57.627157Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:28:57.627195Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:57.627457Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:57.627551Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:57.627873Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:28:57.628016Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:28:57.628103Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:57.628162Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:57.628217Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:28:57.628261Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:28:57.628295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:28:57.628327Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:28:57.628374Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:57.628832Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:57.628877Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:57.628958Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:28:57.629067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:28:57.629106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:28:57.629213Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:28:57.629475Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:28:57.629541Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:28:57.629623Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:28:57.629672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status ... Z node 5 :TX_DATASHARD TRACE: execute_write_unit.cpp:48: add locks to result: 0 2026-01-08T15:29:20.695434Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:29:20.695461Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2026-01-08T15:29:20.695498Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2026-01-08T15:29:20.695529Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:29:20.695604Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:29:20.695640Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2026-01-08T15:29:20.695678Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:29:20.695719Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:29:20.695758Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:2] at 72075186224037888 is Executed 2026-01-08T15:29:20.695778Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:29:20.695805Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:2] at 72075186224037888 has finished 2026-01-08T15:29:20.706531Z node 5 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2026-01-08T15:29:20.706628Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:29:20.706691Z node 5 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2026-01-08T15:29:20.706783Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:29:20.708751Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [5:68:2115] Handle TEvNavigate describe path /Root/table-1 2026-01-08T15:29:20.708898Z node 5 :TX_PROXY DEBUG: describe.cpp:270: Actor# [5:1057:2876] HANDLE EvNavigateScheme /Root/table-1 2026-01-08T15:29:20.709373Z node 5 :TX_PROXY DEBUG: describe.cpp:354: Actor# [5:1057:2876] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:29:20.709495Z node 5 :TX_PROXY DEBUG: describe.cpp:433: Actor# [5:1057:2876] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table-1" Options { ShowPrivateTable: true } 2026-01-08T15:29:20.710678Z node 5 :TX_PROXY DEBUG: describe.cpp:446: Actor# [5:1057:2876] Handle TEvDescribeSchemeResult Forward to# [5:829:2724] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 38 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 2000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046644480 2026-01-08T15:29:20.711660Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [5:1061:2880], Recipient [5:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.711720Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.711770Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [5:1060:2879], serverId# [5:1061:2880], sessionId# [0:0:0] 2026-01-08T15:29:20.711906Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553169, Sender [5:1059:2878], Recipient [5:886:2765]: NKikimrTxDataShard.TEvGetInfoRequest 2026-01-08T15:29:20.712811Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [5:1064:2883], Recipient [5:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.712864Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.712914Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [5:1063:2882], serverId# [5:1064:2883], sessionId# [0:0:0] 2026-01-08T15:29:20.713070Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553210, Sender [5:1062:2881], Recipient [5:886:2765]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 38 } CompactBorrowed: false 2026-01-08T15:29:20.713190Z node 5 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 38 localTid# 1001, requested from [5:1062:2881], partsCount# 0, memtableSize# 728, memtableWaste# 3880, memtableRows# 3 2026-01-08T15:29:20.776858Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:02.511344Z 2026-01-08T15:29:20.776962Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2026-01-08T15:29:20.777022Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [5:1062:2881]pathId# [OwnerId: 72057594046644480, LocalPathId: 38] 2026-01-08T15:29:20.777803Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 268828683, Sender [5:877:2759], Recipient [5:886:2765]: NKikimr::TEvTablet::TEvFollowerGcApplied 2026-01-08T15:29:20.778309Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [5:1071:2889], Recipient [5:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.778408Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:20.778463Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [5:1070:2888], serverId# [5:1071:2889], sessionId# [0:0:0] 2026-01-08T15:29:20.778615Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553210, Sender [5:1069:2887], Recipient [5:886:2765]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 38 } CompactBorrowed: false 2026-01-08T15:29:20.778720Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:118: Background compaction of tablet# 72075186224037888 of path# [OwnerId: 72057594046644480, LocalPathId: 38], requested from# [5:1069:2887] is not needed |85.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_background_compaction/unittest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] >> DataShardFollowers::FollowerAfterDataCompaction [GOOD] >> DataShardFollowers::FollowerDuringSysPartSwitch >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath [GOOD] >> QuoterWithKesusTest::HandlesNonExistentResource >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPathV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPathV2 >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds >> DescribeSchemaSecretsService::SchemeCacheRetryErrors [GOOD] >> DescribeSchemaSecretsService::SchemeCacheMultipleNotRetryableErrors >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding >> TCreateAndDropViewTest::DropViewIfExists [GOOD] >> TCreateAndDropViewTest::DropViewInFolder >> TTxDataShardLocalKMeansScan::MainToPosting [GOOD] >> TTxDataShardLocalKMeansScan::MainToPostingWithOverlap >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertBlockedByOlderVolatile [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertDuplicateKeyAtSnapshot >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] |85.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |85.7%| [TM] {BAZEL_UPLOAD} ydb/tests/tools/nemesis/ut/py3test >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> DSProxyStrategyTest::Restore_mirror3dc [GOOD] Test command err: diskMask# 333 nonWorkingDomain# 0 91512 diskMask# 333 nonWorkingDomain# 1 8640 diskMask# 334 nonWorkingDomain# 0 63264 diskMask# 334 nonWorkingDomain# 1 8640 diskMask# 335 nonWorkingDomain# 0 43620 diskMask# 335 nonWorkingDomain# 1 4320 diskMask# 336 nonWorkingDomain# 0 148794 diskMask# 336 nonWorkingDomain# 1 64800 diskMask# 337 nonWorkingDomain# 0 124764 diskMask# 337 nonWorkingDomain# 1 8640 diskMask# 338 nonWorkingDomain# 0 124764 diskMask# 338 nonWorkingDomain# 1 8640 diskMask# 339 nonWorkingDomain# 0 73344 diskMask# 339 nonWorkingDomain# 1 47544 diskMask# 340 nonWorkingDomain# 0 124764 diskMask# 340 nonWorkingDomain# 1 64800 diskMask# 341 nonWorkingDomain# 0 73344 diskMask# 341 nonWorkingDomain# 1 8640 diskMask# 342 nonWorkingDomain# 0 73344 diskMask# 342 nonWorkingDomain# 1 8640 diskMask# 343 nonWorkingDomain# 0 31656 diskMask# 343 nonWorkingDomain# 1 47544 diskMask# 344 nonWorkingDomain# 0 129324 >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> TDescriberTests::TopicNotTopic [GOOD] >> TDescriberTests::CDC |85.8%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/dsproxy/ut_strategy/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] |85.7%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |85.8%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] >> TFetchRequestTests::SmallBytesRead [GOOD] >> TFetchRequestTests::EmptyTopic >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter [GOOD] >> QuoterWithKesusTest::HandlesNonExistentResource [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] >> TMLPConsumerTests::HtmlApp_BadPartition [GOOD] >> TMLPCountersTests::SimpleCounters >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPathV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutTokenV1 >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> DataShardFollowers::FollowerDuringSysPartSwitch [GOOD] >> DataShardFollowers::FollowerDuringDataPartSwitch >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertDuplicateKeyAtSnapshot [GOOD] >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertMissingKeyAtSnapshot >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] >> TTxDataShardLocalKMeansScan::MainToPostingWithOverlap [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuild >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] >> TCreateAndDropViewTest::DropViewInFolder [GOOD] >> TCreateAndDropViewTest::ContextPollution >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob >> YdbSdkSessions::TestSessionPool >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] >> YdbSdkSessions::TestMultipleSessions >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection >> DescribeSchemaSecretsService::SchemeCacheMultipleNotRetryableErrors [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame1 [GOOD] >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame2 >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] >> YdbSdkSessions::TestMultipleSessions [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource [GOOD] >> QuoterWithKesusTest::GetsQuota >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] >> YdbSdkSessions::TestSessionPool [GOOD] >> TDescriberTests::CDC [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> KeyValueGRPCService::SimpleWriteReadWithoutTokenV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutTokenV2 >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame2 [GOOD] >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:248: Test is failing right now |85.7%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/persqueue/public/describer/ut/unittest >> TDescriberTests::CDC [GOOD] Test command err: 2026-01-08T15:28:56.816883Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013137665577263:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:56.817028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:56.844499Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:28:56.976859Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:56.982987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:56.983102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:57.036665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:57.036980Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:57.038005Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013137665577240:2081] 1767886136816244 != 1767886136816247 2026-01-08T15:28:57.106657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/002387/r3tmp/yandexCJfuOD.tmp 2026-01-08T15:28:57.106682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/002387/r3tmp/yandexCJfuOD.tmp 2026-01-08T15:28:57.106845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/002387/r3tmp/yandexCJfuOD.tmp 2026-01-08T15:28:57.106956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:57.132800Z INFO: TTestServer started on Port 1838 GrpcPort 8890 2026-01-08T15:28:57.242230Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions PQClient connected to localhost:8890 2026-01-08T15:28:57.289887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:57.361201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:57.456005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:28:57.830525Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:59.709754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013150550480159:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:59.709835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013150550480169:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:59.712181Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:59.712858Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013150550480175:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:59.712911Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:59.722748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:59.752651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013150550480174:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2026-01-08T15:28:59.849584Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013150550480240:2639] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:00.056648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:00.064365Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013150550480250:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:29:00.067239Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=OTI0YmMxZGYtMzRhMTM4ZDktNTFlOTU2MDgtYzEwNGE1Yjk=, ActorId: [1:7593013150550480157:2329], ActorState: ExecuteState, LegacyTraceId: 01kef3kc9tf21c3qewhez1h1aw, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:29:00.069980Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:29:00.097931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:00.242585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013154845447832:2818] 2026-01-08T15:29:01.817683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013137665577263:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:01.817788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2026-01-08T15:29:06.531993Z :TopicExists INFO: TTopicSdkTestSetup started 2026-01-08T15:29:06.552000Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:29:06.577442Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013180615251832:2926] connected; active server actors: 1 2026-01-08T15:29:06.577745Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:29:06.579145Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:29:06.579287Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2026-01-08T15:29:06.580926Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:29:06.581885Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3055: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2026-01-08T15:29:06.582073Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:788: [PQ: 72075186224037892] no config, start with empty partitions and default config 2026-01-08T15:29:06.582119Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:29:06.582154Z node 1 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72075186224037892] doesn't have tx write ... TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186224037895 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037896 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 51 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 52 PathOwnerId: 72057594046644480 } 2026-01-08T15:29:39.339224Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [4:7593013276353298193:2106], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /Root/table1/feed/streamImpl PathId: [OwnerId: 72057594046644480, LocalPathId: 52] DescribeSchemeResult: Status: StatusSuccess Path: "/Root/table1/feed/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 52 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710674 CreateStep: 1767886179374 ParentPathId: 51 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "streamImpl" PathId: 52 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "feed" TopicPath: "/Root/table1/feed/streamImpl" YdbDatabasePath: "/Root" PartitionKeySchema { Name: "id" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186224037895 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037896 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 51 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 52 PathOwnerId: 72057594046644480 }, by path# { Subscriber: { Subscriber: [4:7593013323597940144:3102] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 4 TableKind: 0 Created: 0 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 52] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 1 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [4:7593013323597940144:3102] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 4 TableKind: 0 Created: 0 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 52] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 1 IsVirtual: 0 SchemaVersion: 0 } 2026-01-08T15:29:39.343816Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:30: [[4:7593013323597940155:3107]] Create request [/Root/table1/feed] with SyncVersion=0 2026-01-08T15:29:39.343970Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7593013276353298193:2106], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 1 ResultSet [{ Path: Root/table1/feed TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:29:39.344125Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [4:7593013276353298193:2106], cacheItem# { Subscriber: { Subscriber: [4:7593013323597940123:3094] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 13 TableKind: 0 Created: 1 CreateStep: 1767886179374 PathId: [OwnerId: 72057594046644480, LocalPathId: 51] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/table1/feed TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:29:39.344307Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [4:7593013323597940156:3108], recipient# [4:7593013323597940155:3107], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 1 ResultSet [{ Path: Root/table1/feed TableId: [72057594046644480:51:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindCdcStream DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [streamImpl] }] } 2026-01-08T15:29:39.344357Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:54: [[4:7593013323597940155:3107]] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult 2026-01-08T15:29:39.344389Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:84: [[4:7593013323597940155:3107]] Path '/Root/table1/feed' is a CDC 2026-01-08T15:29:39.344432Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:30: [[4:7593013323597940155:3107]] Create request [/Root/table1/feed/streamImpl] with SyncVersion=0 2026-01-08T15:29:39.344568Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7593013276353298193:2106], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 1 ResultSet [{ Path: Root/table1/feed/streamImpl TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:29:39.344669Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [4:7593013276353298193:2106], cacheItem# { Subscriber: { Subscriber: [4:7593013323597940144:3102] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 4 TableKind: 0 Created: 1 CreateStep: 1767886179374 PathId: [OwnerId: 72057594046644480, LocalPathId: 52] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 1 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/table1/feed/streamImpl TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:29:39.344809Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [4:7593013323597940158:3109], recipient# [4:7593013323597940155:3107], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 1 ResultSet [{ Path: Root/table1/feed/streamImpl TableId: [72057594046644480:52:0] RequestType: ByPath Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTopic DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:29:39.344842Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:54: [[4:7593013323597940155:3107]] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult 2026-01-08T15:29:39.344865Z node 4 :PQ_DESCRIBER DEBUG: describer.cpp:103: [[4:7593013323597940155:3107]] Path '/Root/table1/feed/streamImpl' SUCCESS 2026-01-08T15:29:39.358397Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:39.358440Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.358459Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:39.358509Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.358529Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:39.428740Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037895][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:39.428766Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.428777Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037895][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:39.428788Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.428798Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037895][Partition][0][StateIdle] Try persist 2026-01-08T15:29:39.458547Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:39.458582Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.458598Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:39.458618Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:39.458633Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] |85.8%| [TS] {BAZEL_UPLOAD} ydb/core/persqueue/public/describer/ut/unittest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] >> DataShardFollowers::FollowerDuringDataPartSwitch [GOOD] >> DataShardFollowers::FollowerReadDuringSplit >> DataShardSnapshotIsolation::ReadWriteUncommittedInsertMissingKeyAtSnapshot [GOOD] >> DataShardSnapshotIsolation::ReadWriteObserveOwnChanges >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> YdbSdkSessions::MultiThreadSync >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:588: Enable after accepting a pull request with merging configs |85.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut/unittest >> TStateStorageConfigCompareWithOld::TestReplicaActorIdAndSelectionIsSame2 [GOOD] >> YdbSdkSessions::MultiThreadSync [GOOD] >> YdbSdkSessions::SessionsServerLimit [SKIPPED] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] |85.8%| [TM] {BAZEL_UPLOAD} ydb/core/base/ut/unittest >> TTxDataShardLocalKMeansScan::MainToBuild [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuildWithOverlap >> TCreateAndDropViewTest::ContextPollution [GOOD] >> TEvaluateExprInViewTest::EvaluateExpr >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/federated_query/ut_service/fast/unittest >> DescribeSchemaSecretsService::SchemeCacheMultipleNotRetryableErrors [GOOD] Test command err: Trying to start YDB, gRPC: 28266, MsgBus: 63932 2026-01-08T15:26:03.438391Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012394692976105:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:03.439151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:03.784968Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:03.789863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:03.789955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:03.860101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:04.051028Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012394692976074:2081] 1767885963435673 != 1767885963435676 2026-01-08T15:26:04.071111Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:04.084086Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:04.216676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:26:04.216699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:26:04.216704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:26:04.216819Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:04.447218Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:05.133122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:05.143634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:26:05.227122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:05.426796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:05.689281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:05.794509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:07.788053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012411872847136:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:07.788710Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:07.789080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012411872847146:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:07.789116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:08.438352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012394692976105:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:08.438430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:08.795040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:08.835419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:08.868218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:08.934160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:09.012617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:09.074099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:09.231692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:09.460231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:10.044773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012424757749930:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:10.044847Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:10.047176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012424757749936:2492], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:10.047213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012424757749935:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:10.047595Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:10.066134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:26:10.121412Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012424757749939:2493], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:26:10.180606Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012424757749990:3786] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:26:12.362700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... 5:29:25.615875Z node 14 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[14:7593013264245589385:2101];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:25.615992Z node 14 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:25.826434Z node 14 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:25.843701Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:25.843848Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:25.911514Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:25.912339Z node 14 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:26.082496Z node 14 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:26.252736Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:26.252777Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:26.252790Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:26.252920Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:26.638498Z node 14 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:27.683627Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:27.695256Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:29:27.714226Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:27.827281Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:28.090412Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:28.220300Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:30.615995Z node 14 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[14:7593013264245589385:2101];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:30.616125Z node 14 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:29:32.402503Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7593013294310362274:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:32.402663Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:32.403568Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7593013294310362283:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:32.403655Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:32.590862Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:32.692338Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:32.781348Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:32.872222Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:32.997135Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:33.095606Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:33.231486Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:33.381485Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:33.589201Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7593013298605330476:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:33.589384Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:33.590054Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7593013298605330481:2492], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:33.590133Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7593013298605330482:2493], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:33.590387Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:33.601099Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:29:33.673088Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7593013298605330485:2494], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:29:33.778902Z node 14 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [14:7593013298605330550:3783] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:36.847220Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) |85.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] |85.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/federated_query/ut_service/fast/unittest |85.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest |85.8%| [TA] $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount |85.8%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] |85.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/public/mlp/ut/ydb-core-persqueue-public-mlp-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2026-01-08T15:26:07.907127Z :WriteAndReadSomeMessagesWithSyncCompression INFO: Random seed for debugging is 1767885967907101 2026-01-08T15:26:08.897723Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012418970636974:2090];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:08.912035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:08.999927Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:09.024750Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593012420190948392:2109];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:09.025468Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:09.062206Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:09.062608Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:09.350725Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.361513Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:09.491438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.491516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.491664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:09.491698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:09.519565Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:26:09.519930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.536542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:09.773547Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:09.780892Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:09.943252Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:10.074687Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:10.126391Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:10.195851Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008457s 2026-01-08T15:26:10.727164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003ea4/r3tmp/yandexdp4kpa.tmp 2026-01-08T15:26:10.727197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003ea4/r3tmp/yandexdp4kpa.tmp 2026-01-08T15:26:10.737991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003ea4/r3tmp/yandexdp4kpa.tmp 2026-01-08T15:26:10.744504Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:11.189477Z INFO: TTestServer started on Port 25493 GrpcPort 9297 PQClient connected to localhost:9297 2026-01-08T15:26:11.702554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:11.718263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:26:13.899562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012418970636974:2090];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:13.899629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:14.006439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593012420190948392:2109];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:14.006670Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:17.665133Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3e8jb0bjmcdk9n2k1x238", Request deadline has expired for 0.654784s seconds 2026-01-08T15:26:17.666843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457625343991:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.666994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457625343980:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.667144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.675243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012457625343995:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.676299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:17.723120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:26:17.901596Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012457625343994:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:26:18.134260Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012457625344080:2999] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:26:19.202954Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637704 Duration# 0.396026s 2026-01-08T15:26:19.203342Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.396094s 2026-01-08T15:26:19.753989Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593012458845654379:2317], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:26:19.764094Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=ODlkOWEyNTktNjIyMjIyNDctNjIyNTdlYTItNGE3NTMwYTE=, ActorId: [2:7593012458845654354:2311], ActorState: ExecuteState, LegacyTraceId: 01kef3eeqz88p54d8wbr60076j, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:26:19.810117Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:26:19.848524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:19.850113Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: ... n.cpp:2342: [72075186224037892][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:29:41.132524Z node 16 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:41.132572Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:37: [72075186224037892][Partition][0][StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2026-01-08T15:29:41.132626Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:29:41.135386Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:147: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2026-01-08T15:29:41.135434Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2698: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2026-01-08T15:29:41.135538Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:29:41.135889Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:884: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0 2026-01-08T15:29:41.136818Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1767886181136 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:29:41.136951Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2026-01-08T15:29:41.137118Z :INFO: [] MessageGroupId [src] SessionId [src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0] Write session: close. Timeout = 0 ms 2026-01-08T15:29:41.137156Z :INFO: [] MessageGroupId [src] SessionId [src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0] Write session will now close 2026-01-08T15:29:41.137198Z :DEBUG: [] MessageGroupId [src] SessionId [src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0] Write session: aborting 2026-01-08T15:29:41.137586Z :INFO: [] MessageGroupId [src] SessionId [src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0] Write session: gracefully shut down, all writes complete 2026-01-08T15:29:41.137634Z :DEBUG: [] MessageGroupId [src] SessionId [src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0] Write session: destroy 2026-01-08T15:29:41.139350Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0 grpc read done: success: 0 data: 2026-01-08T15:29:41.139384Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0 grpc read failed 2026-01-08T15:29:41.139418Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0 grpc closed 2026-01-08T15:29:41.139442Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|7a9e7ade-702d73d3-b0b3ed1d-7639eb5a_0 is DEAD 2026-01-08T15:29:41.140376Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:29:41.140871Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [15:7593013334723340389:2468] destroyed 2026-01-08T15:29:41.140926Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:29:41.140963Z node 16 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:41.140981Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.141000Z node 16 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:41.141024Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.141042Z node 16 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:41.169743Z :INFO: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Starting read session 2026-01-08T15:29:41.169794Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Starting cluster discovery 2026-01-08T15:29:41.170018Z :INFO: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:25844: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25844
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:25844. " 2026-01-08T15:29:41.170063Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Restart cluster discovery in 0.006723s 2026-01-08T15:29:41.177540Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Starting cluster discovery 2026-01-08T15:29:41.177936Z :INFO: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:25844: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25844
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:25844. " 2026-01-08T15:29:41.178010Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Restart cluster discovery in 0.011024s 2026-01-08T15:29:41.186406Z node 16 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:41.186448Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.186461Z node 16 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:41.186480Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.186496Z node 16 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:41.189521Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Starting cluster discovery 2026-01-08T15:29:41.189779Z :INFO: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:25844: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25844
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:25844. " 2026-01-08T15:29:41.189819Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Restart cluster discovery in 0.022302s 2026-01-08T15:29:41.212512Z :DEBUG: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Starting cluster discovery 2026-01-08T15:29:41.212856Z :NOTICE: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:25844: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25844
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:25844. " } 2026-01-08T15:29:41.213081Z :NOTICE: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:25844: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25844
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:25844. " } 2026-01-08T15:29:41.213232Z :INFO: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Closing read session. Close timeout: 0.000000s 2026-01-08T15:29:41.213354Z :NOTICE: [/Root] [/Root] [a7b7f1d-2cb81112-c0f8bc1c-77639836] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:29:41.288149Z node 16 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:41.288193Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.288212Z node 16 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:41.288235Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.288256Z node 16 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:41.347266Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:29:41.347300Z node 15 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:41.390476Z node 16 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:29:41.390515Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.390531Z node 16 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:29:41.390551Z node 16 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:29:41.390569Z node 16 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:29:41.867282Z node 15 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [15:7593013334723340435:2480] TxId: 281474976710676. Ctx: { TraceId: 01kef3mn0c4394nj0as108g270, Database: /Root, SessionId: ydb://session/3?node_id=15&id=NjUwMWZhNDAtM2IyOWRhMzMtYTAyMzMxY2YtOGU5ZDExZjQ=, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 16 trace_id# 2026-01-08T15:29:41.867434Z node 15 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [15:7593013334723340439:2480], TxId: 281474976710676, task: 3. Ctx: { TraceId : 01kef3mn0c4394nj0as108g270. CheckpointId : . RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=15&id=NjUwMWZhNDAtM2IyOWRhMzMtYTAyMzMxY2YtOGU5ZDExZjQ=. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [15:7593013334723340435:2480], status: UNAVAILABLE, reason: {
: Error: Terminate execution } |85.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/public/mlp/ut/ydb-core-persqueue-public-mlp-ut |85.8%| [TM] {RESULT} ydb/tests/tools/kqprun/tests/py3test |85.8%| [TM] {RESULT} ydb/tests/fq/yt/kqp_yt_import/py3test |85.8%| [TS] {RESULT} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest |85.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_background_compaction/unittest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] |85.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest |85.8%| [TM] {RESULT} ydb/core/kqp/federated_query/ut_service/fast/unittest |85.8%| [TA] {RESULT} $(B)/ydb/core/base/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.8%| [LD] {RESULT} $(B)/ydb/core/persqueue/public/mlp/ut/ydb-core-persqueue-public-mlp-ut |85.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/test-results/unittest/{meta.json ... results_accumulator.log} |85.8%| [TS] {RESULT} ydb/core/persqueue/public/describer/ut/unittest |85.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_vacuum/unittest |85.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_reassign/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:200: Test is failing right now |85.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> QuoterWithKesusTest::GetsQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuota >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] |85.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |85.8%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:548: Enable after accepting a pull request with merging configs |85.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> TMemoryController::MemTable [GOOD] >> TMemoryController::ResourceBroker >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] >> KeyValueGRPCService::SimpleWriteReadWithoutTokenV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1V1 >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario >> DataShardFollowers::FollowerReadDuringSplit [GOOD] >> DataShardSnapshotIsolation::ReadWriteObserveOwnChanges [GOOD] >> DataShardSnapshotIsolation::ReadWriteCommitConflictThenRead |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> TFetchRequestTests::EmptyTopic [GOOD] >> TFetchRequestTests::BadTopicName >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuildWithOverlap [GOOD] >> TTxDataShardLocalKMeansScan::BuildToPosting >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_followers/unittest >> DataShardFollowers::FollowerReadDuringSplit [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:51.110706Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:51.244504Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:51.244956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:51.245154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:51.967473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:51.967596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:52.059764Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886127605395 != 1767886127605399 2026-01-08T15:28:52.073562Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:52.126741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:52.241324Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:52.572022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:52.588313Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:52.703200Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:28:52.703285Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:28:52.703395Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:861:2744] 2026-01-08T15:28:52.866387Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:861:2744] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 PartitionConfig { FollowerGroups { FollowerCount: 1 AllowLeaderPromotion: false } } } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:28:52.866514Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:861:2744] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:28:52.867132Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:28:52.867226Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:861:2744] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:28:52.867589Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:28:52.867720Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:861:2744] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:28:52.867875Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:861:2744] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:28:52.870056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:52.878782Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:861:2744] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:28:52.879679Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:861:2744] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:28:52.879744Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:861:2744] txid# 281474976715657 SEND to# [1:829:2724] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:28:52.921794Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:52.922887Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:52.923163Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:28:52.923467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:52.973028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:52.973809Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:52.973915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:52.975529Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:28:52.975628Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:28:52.975689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:28:52.976056Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:52.976226Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:52.976313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:28:52.987679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:53.045961Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:28:53.046182Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:28:53.046300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:28:53.046360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:53.046434Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:28:53.046485Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:53.046730Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:53.046790Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:53.047099Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:28:53.047199Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:28:53.047331Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:53.047390Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:53.047447Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:28:53.047484Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:28:53.047530Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:28:53.047569Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:28:53.047628Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:53.048040Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:53.048078Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:53.048129Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:28:53.048222Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:28:53.048267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:28:53.048417Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:28:53.048656Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:28:53.048732Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:28:53.048822Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:28:5 ... }, { items { uint32_value: 2 } items { uint32_value: 22 } } ... reading from the right follower 2026-01-08T15:29:49.286575Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:352: actor# [8:68:2115] Handle TEvExecuteKqpTransaction 2026-01-08T15:29:49.286685Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:341: actor# [8:68:2115] TxId# 281474976715665 ProcessProposeKqpTransaction 2026-01-08T15:29:49.288467Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269877761, Sender [8:1302:3061], Recipient [8:1277:3043]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:49.288559Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3303: StateWorkAsFollower, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:29:49.288646Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at follower 1 tablet# 72075186224037890, clientId# [8:1300:3060], serverId# [8:1302:3061], sessionId# [0:0:0] 2026-01-08T15:29:49.291341Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553215, Sender [8:1305:3062], Recipient [8:1277:3043]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:29:49.291526Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3305: StateWorkAsFollower, processing event TEvDataShard::TEvRead 2026-01-08T15:29:49.291679Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2026-01-08T15:29:49.291764Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:49.291945Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2026-01-08T15:29:49.292081Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:837: Updating sys metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=6, epoch=1} 2026-01-08T15:29:49.292869Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:854: Updating tables metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=4, epoch=1} 2026-01-08T15:29:49.293410Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:925: Updating snapshots metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=0, epoch=1} 2026-01-08T15:29:49.293506Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037890 changed HEAD read to repeatable v2500/18446744073709551615 2026-01-08T15:29:49.293582Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2026-01-08T15:29:49.293695Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:29:49.293771Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2026-01-08T15:29:49.293827Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2026-01-08T15:29:49.293875Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2026-01-08T15:29:49.293928Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2026-01-08T15:29:49.293989Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:29:49.294018Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2026-01-08T15:29:49.294046Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2026-01-08T15:29:49.294079Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2026-01-08T15:29:49.294266Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037890 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:29:49.294660Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Restart 2026-01-08T15:29:49.294711Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Restart at tablet# 72075186224037890 2026-01-08T15:29:49.294813Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> retry Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:49.294898Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} pin 0 (0 b) load 1 (65 b) 2026-01-08T15:29:49.294997Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 8388608b of static mem, Memory{8388608 dyn 0} 2026-01-08T15:29:49.295101Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} request page collection [72075186224037888:1:23:1:12288:190:0] pages [ 0 ] 2026-01-08T15:29:49.295196Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} postponed, loading 1 pages, 65 bytes, newly pinned 0 pages, 0 bytes 2026-01-08T15:29:49.295447Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} got result TEvResult{1 pages [72075186224037888:1:23:1:12288:190:0] ok OK}, type 1 2026-01-08T15:29:49.295566Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} activated 2026-01-08T15:29:49.295654Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2026-01-08T15:29:49.295699Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2026-01-08T15:29:49.295826Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037890 Execute read# 2, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:29:49.296095Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037890 Complete read# {[8:1305:3062], 0} after executionsCount# 2 2026-01-08T15:29:49.296180Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037890 read iterator# {[8:1305:3062], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2026-01-08T15:29:49.296312Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:29:49.296347Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2026-01-08T15:29:49.296379Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2026-01-08T15:29:49.296409Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2026-01-08T15:29:49.296460Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:29:49.296488Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2026-01-08T15:29:49.296526Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:1] at 72075186224037890 has finished 2026-01-08T15:29:49.296573Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2026-01-08T15:29:49.296699Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 2 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:49.296778Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 8388608b of static, Memory{0 dyn 0} 2026-01-08T15:29:49.296840Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2026-01-08T15:29:49.297034Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553217, Sender [8:1277:3043], Recipient [8:1277:3043]: NKikimr::TEvDataShard::TEvReadContinue 2026-01-08T15:29:49.297087Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3306: StateWorkAsFollower, processing event TEvDataShard::TEvReadContinue 2026-01-08T15:29:49.297191Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} queued, type NKikimr::NDataShard::TDataShard::TTxReadContinue 2026-01-08T15:29:49.297274Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:29:49.297368Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3081: 72075186224037890 ReadContinue for iterator# {[8:1305:3062], 0}, firstUnprocessedQuery# 0 2026-01-08T15:29:49.297461Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3174: 72075186224037890 ReadContinue: iterator# {[8:1305:3062], 0}, FirstUnprocessedQuery# 0 2026-01-08T15:29:49.297607Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3338: 72075186224037890 readContinue iterator# {[8:1305:3062], 0} sends rowCount# 0, bytes# 0, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:29:49.297697Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3362: 72075186224037890 read iterator# {[8:1305:3062], 0} finished in ReadContinue 2026-01-08T15:29:49.297819Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:29:49.297887Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:29:49.299591Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553219, Sender [8:1305:3062], Recipient [8:1277:3043]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:29:49.299661Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3308: StateWorkAsFollower, processing event TEvDataShard::TEvReadCancel 2026-01-08T15:29:49.299756Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037890 ReadCancel: { ReadId: 0 } { items { uint32_value: 3 } items { uint32_value: 33 } } |85.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_followers/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumnAliases-default.txt] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> TMLPCountersTests::SimpleCounters [GOOD] >> TMLPDLQMoverTests::MoveToDLQ_ShortMessage >> QuoterWithKesusTest::GetsBigQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuotaWithDeadline |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test >> TIndexProcesorTests::TestOver1000Queues [GOOD] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter >> TMemoryController::ResourceBroker [GOOD] >> TMemoryController::ResourceBroker_ConfigLimit >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/yc_search_ut/unittest >> TIndexProcesorTests::TestOver1000Queues [GOOD] Test command err: 2026-01-08T15:29:02.171932Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013166167042730:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:02.172034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:02.387112Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:02.396951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:02.397043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:02.453089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:02.466697Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:02.467916Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013166167042702:2081] 1767886142170409 != 1767886142170412 2026-01-08T15:29:02.534632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:02.534668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:02.534683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:02.534780Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:02.617227Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:02.750019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:03.179654Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:05.091756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:05.093699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:05.514274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Error 1: Check failed: path: '/Root/SQS', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges) 2026-01-08T15:29:05.554410Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013179051945716:2667] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/SQS\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:05.766829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:05.770412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) ===Execute query: UPSERT INTO `/Root/SQS/SingleCreateQueueEvent/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "queue1", 1, "myQueueCustomName", 1767886145572, "myFolder", "{\"k1\": \"v1\"}"); 2026-01-08T15:29:05.894466Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013179051945882:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:05.894557Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:05.894958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013179051945894:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:05.895004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013179051945895:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:05.895075Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:29:05.899484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715667:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:29:05.913790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715667, at schemeshard: 72057594046644480 2026-01-08T15:29:05.914070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013179051945898:2359], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715667 completed, doublechecking } 2026-01-08T15:29:06.011132Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013183346913245:2818] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 52], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:05.572000Z","resource_id":"queue1","name":"myQueueCustomName","service":"message-queue","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:05.572000Z","resource_id":"queue1","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:07.107503Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} 2026-01-08T15:29:07.174432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013166167042730:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:07.174510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Error 1: Check failed: path: '/Root/SQS/Root/SQS/SingleCreateQueueEvent', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNoChanges) 2026-01-08T15:29:07.875330Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013187641880754:2901] txid# 281474976715680, issues: { message: "Check failed: path: \'/Root/SQS/Root/SQS/SingleCreateQueueEvent\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:07.916501Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013187641880763:2906] txid# 281474976715681, issues: { message: "Check failed: path: \'/Root/SQS/SingleCreateQueueEvent/.Queues\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 47], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:07.924861Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013187641880770:2911] txid# 281474976715682, issues: { message: "Check failed: path: \'/Root/SQS/SingleCreateQueueEvent/.Events\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 48], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } ===Execute query: UPSERT INTO `/Root/SQS/SingleCreateQueueEvent/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "queue1", 1, "myQueueCustomName", 1767886147521, "myFolder", NULL); ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:07.521000Z","resource_id":"queue1","name":"myQueueCustomName","service":"message-queue","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}]} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:07.521000Z","resource_id":"queue1","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:08.183932Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}]} 2026-01-08T15:29:08.790967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715694:0, ... 1: Check failed: path: '/Root/SQS/Root/SQS/DeletedQueueNotReindexed', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 57], type: EPathTypeDir, state: EPathStateNoChanges) 2026-01-08T15:29:13.791501Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013213411685398:3376] txid# 281474976715722, issues: { message: "Check failed: path: \'/Root/SQS/DeletedQueueNotReindexed/.Queues\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 59], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:29:13.808030Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013213411685405:3381] txid# 281474976715723, issues: { message: "Check failed: path: \'/Root/SQS/DeletedQueueNotReindexed/.Events\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 60], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } ===Execute query: UPSERT INTO `/Root/SQS/DeletedQueueNotReindexed/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "queue2", "myQueueCustomName", 1767886152000, "myFolder", NULL); ===Execute query: UPSERT INTO `/Root/SQS/DeletedQueueNotReindexed/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "queue2", 0, "myQueueCustomName", 1767886153489, "myFolder", NULL); ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:13.489000Z","resource_id":"queue2","name":"myQueueCustomName","service":"message-queue","deleted":"2026-01-08T15:29:13.489000Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}]} 2026-01-08T15:29:15.375930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715730:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:15.388574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715730, at schemeshard: 72057594046644480 2026-01-08T15:29:15.394840Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013222001620121:3440] txid# 281474976715731, issues: { message: "Check failed: path: \'/Root/SQS\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 1: Check failed: path: '/Root/SQS', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges) 2026-01-08T15:29:15.666555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715732, at schemeshard: 72057594046644480 2026-01-08T15:29:15.706276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715734:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:15.708130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715733:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "existing1", "myQueueCustomName", 1767886154000, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "existing2", "myQueueCustomName", 1767886154000, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "existing3", "myQueueCustomName", 1767886154000, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "deleting1", "myQueueCustomName", 1767886154000, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Queues` (Account, QueueName, CustomQueueName, CreatedTimestamp, FolderId, Tags) VALUES ("cloud1", "deleting2", "myQueueCustomName", 1767886154000, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "deleting1", 0, "myQueueCustomName", 1767886155422, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "deleting2", 0, "myQueueCustomName", 1767886155422, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "creating1", 1, "myQueueCustomName", 1767886155422, "myFolder", "{\"k1\": \"v1\"}"); ===Execute query: UPSERT INTO `/Root/SQS/TestManyMessages/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "creating2", 1, "myQueueCustomName", 1767886155422, "myFolder", "{\"k1\": \"v1\"}"); 2026-01-08T15:29:17.378570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:29:17.378605Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"deleting2","name":"myQueueCustomName","service":"message-queue","deleted":"2026-01-08T15:29:15.422000Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"deleting1","name":"myQueueCustomName","service":"message-queue","deleted":"2026-01-08T15:29:15.422000Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"creating1","name":"myQueueCustomName","service":"message-queue","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"creating2","name":"myQueueCustomName","service":"message-queue","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:14.000000Z","resource_id":"existing1","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:18.751457Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:14.000000Z","resource_id":"existing2","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:18.751533Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"creating1","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:18.751565Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:14.000000Z","resource_id":"existing3","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:18.751593Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} ===CheckEventsLine: {"resource_type":"message-queue","timestamp":"2026-01-08T15:29:15.422000Z","resource_id":"creating2","name":"myQueueCustomName","service":"message-queue","reindex_timestamp":"2026-01-08T15:29:18.751642Z","permission":"ymq.queues.list","cloud_id":"cloud1","folder_id":"myFolder","resource_path":[{"resource_type":"resource-manager.folder","resource_id":"myFolder"}],"attributes":{"labels":{"k1": "v1"}}} 2026-01-08T15:29:19.564037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:19.575635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715761, at schemeshard: 72057594046644480 2026-01-08T15:29:19.580988Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013239181489904:3734] txid# 281474976715762, issues: { message: "Check failed: path: \'/Root/SQS\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 1: Check failed: path: '/Root/SQS', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNoChanges) 2026-01-08T15:29:19.890507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715763, at schemeshard: 72057594046644480 2026-01-08T15:29:19.931847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715764:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:29:19.942908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715765:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) ===Started add queue batch |85.9%| [TM] {BAZEL_UPLOAD} ydb/core/ymq/actor/yc_search_ut/unittest |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> DataShardStats::SharedCacheGarbage [GOOD] >> DataShardStats::CollectStatsForSeveralParts |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |85.9%| [TM] {RESULT} ydb/core/tx/datashard/ut_followers/unittest |85.9%| [TM] {RESULT} ydb/core/ymq/actor/yc_search_ut/unittest |85.9%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs >> TEvaluateExprInViewTest::EvaluateExpr [GOOD] >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction |85.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1V1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1V2 |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__SYNC-pk_types1-all_types1-index1---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |85.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types12-all_types12-index12---] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> DataShardSnapshotIsolation::ReadWriteCommitConflictThenRead [GOOD] >> DataShardSnapshotIsolation::ReadWriteCommitConflictWhileReading >> test.py::test[solomon-LabelColumnAliases-default.txt] [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgint4-pk_types4-all_types4-index4-pgint4--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] |85.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |85.9%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> QuoterWithKesusTest::GetsBigQuotaWithDeadline [GOOD] >> QuoterWithKesusTest::FailsToGetBigQuota |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |85.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] >> TTxDataShardLocalKMeansScan::BuildToPosting [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild |85.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_disk_quotas/ydb-core-tx-datashard-ut_disk_quotas |85.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_disk_quotas/ydb-core-tx-datashard-ut_disk_quotas |86.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_disk_quotas/ydb-core-tx-datashard-ut_disk_quotas |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |86.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |86.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |86.0%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut >> TFetchRequestTests::BadTopicName [GOOD] >> TFetchRequestTests::CheckAccess |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |86.0%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/663n/002bb4/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit_log.1bflp8dj.txt 2026-01-08T15:29:46.230444Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |86.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/ut/ydb-core-client-ut |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut |86.0%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |86.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] >> TMemoryController::ResourceBroker_ConfigLimit [GOOD] >> TMemoryController::ResourceBroker_ConfigCS >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgdate-pk_types6-all_types6-index6-pgdate--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/rate_limiter/ut/unittest >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] Test command err: 2026-01-08T15:28:38.066042Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013061781710778:2081];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:38.066130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:38.289877Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:38.315068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:38.315188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:38.322508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:38.375229Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:38.542987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:38.543012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:38.543019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:38.543122Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:38.565724Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:38.778095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:38.888755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:28:39.091147Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:43.001029Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013085564003632:2090];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:43.002376Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:43.075000Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:43.249575Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:43.253297Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:43.274117Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:43.274254Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:43.315488Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:43.392525Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:43.392549Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:43.392561Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:43.392693Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:43.648221Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:43.649273Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:43.735019Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:28:44.020201Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:48.057472Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.012073s 2026-01-08T15:28:48.065477Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:48.065811Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:48.078155Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006898s 2026-01-08T15:28:48.252580Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:48.283387Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:48.283545Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:48.313523Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:48.328114Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:48.437147Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007556s 2026-01-08T15:28:48.479884Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:48.479912Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:48.479919Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:48.480011Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:48.748472Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:48.750604Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:48.824710Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:28:49.048156Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:54.395398Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013131435330287:2081];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:54.395480Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:54.418823Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:54.494585Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:54.511832Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:54.511909Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:54.535966Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:54.594230Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:28:54.594246Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:28:54.594261Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:28:54.594336Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:54.700711Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:54.805837Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/ ... 2Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:33.725444Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:33.834603Z node 30 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.017611s 2026-01-08T15:29:34.038663Z node 28 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:34.070649Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:34.070678Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:34.070690Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:34.070806Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:34.113582Z node 28 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:34.721003Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:34.864705Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:29:41.231311Z node 31 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7593013331669504004:2150];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:41.231408Z node 31 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:41.277763Z node 31 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:41.437199Z node 31 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:41.453416Z node 31 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:41.463753Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:41.463901Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:41.502107Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:41.659367Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:41.659400Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:41.659416Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:41.659557Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:41.917269Z node 31 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:42.032224Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:42.136205Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:29:42.245371Z node 31 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:48.240030Z node 34 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[34:7593013363666465224:2155];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:48.240114Z node 34 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:48.270134Z node 34 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:48.426208Z node 34 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:48.437926Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:48.438010Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:48.445558Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:48.474407Z node 34 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:48.575195Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:48.575227Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:48.575238Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:48.575353Z node 34 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:48.781557Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:48.851385Z node 34 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:48.903094Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:29:49.257225Z node 34 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:55.136355Z node 37 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7593013394725178896:2198];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:29:55.136694Z node 37 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:29:55.173441Z node 37 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.006646s 2026-01-08T15:29:55.191445Z node 37 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:55.345098Z node 37 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:55.380704Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:55.380842Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:55.418450Z node 37 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:29:55.420254Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:55.583716Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:29:55.583748Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:29:55.583758Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:29:55.583873Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:29:55.808343Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:29:55.925642Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2026-01-08T15:29:55.985121Z node 37 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:56.157078Z node 37 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |86.0%| [TM] {BAZEL_UPLOAD} ydb/services/rate_limiter/ut/unittest |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |86.0%| [TM] {RESULT} ydb/services/rate_limiter/ut/unittest |86.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings >> DataShardStats::CollectStatsForSeveralParts [GOOD] >> DataShardStats::NoData >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:539: Enable after interactive tx support |86.0%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1V2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2V1 >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test.py::test[solomon-Subquery-default.txt] [SKIPPED] >> test.py::test[solomon-UnknownSetting-] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/663n/002dcb/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit_log.0xwqf612.txt 2026-01-08T15:29:46.474285Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> DataShardSnapshotIsolation::ReadWriteCommitConflictWhileReading [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> TMLPDLQMoverTests::MoveToDLQ_ShortMessage [GOOD] >> TMLPDLQMoverTests::MoveToDLQ_BigMessage >> QuoterWithKesusTest::FailsToGetBigQuota [GOOD] >> QuoterWithKesusTest::PrefetchCoefficient >> test_canonical_records.py::test_topic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot_isolation/unittest >> DataShardSnapshotIsolation::ReadWriteCommitConflictWhileReading [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:27:13.147345Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:13.530936Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:13.533042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:27:13.537365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:16.747060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:16.747207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:16.947630Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886030318174 != 1767886030318178 2026-01-08T15:27:17.043647Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:17.140339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:17.424747Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:27:18.394397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:18.448136Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:20.341207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:865:2748], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:20.342595Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:20.346130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:883:2753], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:20.353606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:20.629934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:27:20.836069Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:904:2773], Recipient [1:912:2779]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:27:20.836912Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:904:2773], Recipient [1:912:2779]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:27:20.837145Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:912:2779] 2026-01-08T15:27:20.837313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:27:21.307940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:904:2773], Recipient [1:912:2779]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:27:21.318369Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:27:21.318856Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:27:21.356456Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:27:21.357483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:27:21.358334Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:27:21.364824Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:27:21.366180Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:27:21.367171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2779] in generation 1 2026-01-08T15:27:21.374090Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:27:21.718843Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:27:21.719060Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:27:21.719166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:930:2789] 2026-01-08T15:27:21.719202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:27:21.719235Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:27:21.719267Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:27:21.719483Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:912:2779], Recipient [1:912:2779]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:21.719525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:27:21.719820Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:27:21.719904Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:27:21.719966Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:27:21.720006Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:27:21.720057Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:27:21.720089Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:27:21.720131Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:27:21.720162Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:27:21.720210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:27:21.720323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:919:2783], Recipient [1:912:2779]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:21.720364Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:21.720412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:911:2778], serverId# [1:919:2783], sessionId# [0:0:0] 2026-01-08T15:27:21.720767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:919:2783] 2026-01-08T15:27:21.720809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:27:21.720895Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:27:21.721116Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:27:21.721181Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:27:21.721254Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:27:21.721305Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:27:21.721345Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:27:21.721377Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:27:21.721407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:27:21.721665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:27:21.721714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:27:21.721746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:27:21.721774Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:27:21.721814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:27:21.721847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPr ... 86224037888 read iterator# {[19:1126:2934], 0} exhausted 2026-01-08T15:30:06.542888Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:30:06.542922Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:30:06.542952Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:30:06.542987Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:30:06.543037Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:30:06.543063Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:30:06.543121Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:6] at 72075186224037888 has finished 2026-01-08T15:30:06.543196Z node 19 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:30:06.543359Z node 19 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:30:06.839648Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [19:1136:2944], Recipient [19:918:2782]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:06.839785Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:06.839904Z node 19 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [19:1135:2943], serverId# [19:1136:2944], sessionId# [0:0:0] 2026-01-08T15:30:06.840362Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 278003712, Sender [19:1134:2942], Recipient [19:918:2782]: NKikimrDataEvents.TEvWrite TxId: 0 TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 38 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } LockMode: OPTIMISTIC_SNAPSHOT_ISOLATION 2026-01-08T15:30:06.840410Z node 19 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2026-01-08T15:30:06.840602Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435074, Sender [19:918:2782], Recipient [19:918:2782]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2026-01-08T15:30:06.840664Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3180: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2026-01-08T15:30:06.840804Z node 19 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2026-01-08T15:30:06.841040Z node 19 :TX_DATASHARD TRACE: datashard_write_operation.cpp:72: Parsing write transaction for 0 at 72075186224037888, record: TxId: 0 TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 38 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } LockMode: OPTIMISTIC_SNAPSHOT_ISOLATION 2026-01-08T15:30:06.841207Z node 19 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 38) table: [1:997:0] 2026-01-08T15:30:06.841341Z node 19 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 38) table: [1:997:0] 2026-01-08T15:30:06.841488Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2026-01-08T15:30:06.841590Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:30:06.841673Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2026-01-08T15:30:06.841753Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:30:06.841811Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:30:06.841878Z node 19 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2500/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2026-01-08T15:30:06.841990Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2026-01-08T15:30:06.842056Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:30:06.842098Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:30:06.842139Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit BlockFailPoint 2026-01-08T15:30:06.842173Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit BlockFailPoint 2026-01-08T15:30:06.842206Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:30:06.842238Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit BlockFailPoint 2026-01-08T15:30:06.842269Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2026-01-08T15:30:06.842300Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2026-01-08T15:30:06.846031Z node 19 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:7] at 72075186224037888 2026-01-08T15:30:06.846173Z node 19 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2500/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2026-01-08T15:30:06.846401Z node 19 :TX_DATASHARD TRACE: datashard_kqp.cpp:834: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 38 HasWrites: true 2026-01-08T15:30:06.846508Z node 19 :TX_DATASHARD TRACE: datashard_user_db.cpp:517: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2026-01-08T15:30:06.846659Z node 19 :TX_DATASHARD DEBUG: execute_write_unit.cpp:469: Skip empty write operation for [0:7] at 72075186224037888 2026-01-08T15:30:06.846748Z node 19 :TX_DATASHARD TRACE: locks.cpp:194: Lock 281474976715662 marked broken at v{min} 2026-01-08T15:30:06.847024Z node 19 :TX_DATASHARD TRACE: execute_write_unit.cpp:48: add locks to result: 0 2026-01-08T15:30:06.847134Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:30:06.847195Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2026-01-08T15:30:06.847271Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2026-01-08T15:30:06.847335Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:30:06.847441Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:30:06.847496Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2026-01-08T15:30:06.847574Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:30:06.847637Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:30:06.847704Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:30:06.847737Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:30:06.847795Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:7] at 72075186224037888 has finished 2026-01-08T15:30:06.848744Z node 19 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2026-01-08T15:30:06.848839Z node 19 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:30:06.848935Z node 19 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2026-01-08T15:30:06.849076Z node 19 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:30:06.849794Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553218, Sender [19:1126:2934], Recipient [19:918:2782]: NKikimrTxDataShard.TEvReadAck ReadId: 0 SeqNo: 1 MaxRows: 1 MaxBytes: 4294967295 2026-01-08T15:30:06.849998Z node 19 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3660: 72075186224037888 ReadAck for read iterator# {[19:1126:2934], 0}: { ReadId: 0 SeqNo: 1 MaxRows: 1 MaxBytes: 4294967295 }, read continued, bytesLeft# 4294967295, rowsLeft# 1 2026-01-08T15:30:06.850107Z node 19 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553217, Sender [19:918:2782], Recipient [19:918:2782]: NKikimr::TEvDataShard::TEvReadContinue 2026-01-08T15:30:06.850211Z node 19 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3081: 72075186224037888 ReadContinue for iterator# {[19:1126:2934], 0}, firstUnprocessedQuery# 0 2026-01-08T15:30:06.852743Z node 19 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3174: 72075186224037888 ReadContinue: iterator# {[19:1126:2934], 0}, FirstUnprocessedQuery# 0 2026-01-08T15:30:06.853134Z node 19 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3277: 72075186224037888 read iterator# {[19:1126:2934], 0} TTxReadContinue::Execute() found broken lock# 281474976715662 2026-01-08T15:30:06.853464Z node 19 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3321: 72075186224037888 read iterator# {[19:1126:2934], 0} TTxReadContinue::Execute() finished with error, aborting: ReadId: 0 SeqNo: 2 Status { Code: ABORTED Issues { message: "Read conflict with concurrent transaction" severity: 1 } } BrokenTxLocks { LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 18446744073709551615 SchemeShard: 72057594046644480 PathId: 38 } NodeId: 19 |86.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_snapshot_isolation/unittest >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction [GOOD] >> TSelectFromViewTest::OneTable >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true [GOOD] >> Coordinator::LastEmptyStepResent >> TTxDataShardLocalKMeansScan::BuildToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuildWithOverlap >> TMemoryController::ResourceBroker_ConfigCS [GOOD] >> TMemoryController::GroupedMemoryLimiter_ConfigCS >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] >> KqpBatchUpdate::DisableFlags-UseSink-UseBatchUpdates >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> KqpBatchUpdate::Large_2 |86.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] |86.0%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2V1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2V2 >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types2-all_types2-index2---ASYNC] >> test.py::test[solomon-UnknownSetting-] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] |86.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |86.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |86.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_snapshot_isolation/unittest |86.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots >> KqpBatchUpdate::UpdateOn |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> KqpBatchUpdate::ManyPartitions_1 >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> TFetchRequestTests::CheckAccess [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> Coordinator::LastEmptyStepResent [GOOD] >> CoordinatorVolatile::PlanResentOnReboots |86.1%| [TM] {asan, default-linux-x86_64, pic, release} ydb/tests/fq/solomon/py3test >> test.py::test[solomon-UnknownSetting-] [GOOD] |86.1%| [TM] {BAZEL_UPLOAD} ydb/tests/fq/solomon/py3test |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |86.1%| [TM] {RESULT} ydb/tests/fq/solomon/py3test |86.1%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/public/fetcher/ut/unittest >> TFetchRequestTests::CheckAccess [GOOD] Test command err: 2026-01-08T15:28:48.888322Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013105850544108:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:48.888613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:48.950803Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:28:49.146424Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:49.151576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:49.151716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:49.194914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:49.330627Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:49.460856Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:49.744721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/00325b/r3tmp/yandexDstc71.tmp 2026-01-08T15:28:49.744754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/00325b/r3tmp/yandexDstc71.tmp 2026-01-08T15:28:49.744932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/00325b/r3tmp/yandexDstc71.tmp 2026-01-08T15:28:49.745027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:28:49.894638Z INFO: TTestServer started on Port 22900 GrpcPort 25317 2026-01-08T15:28:49.897896Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:25317 2026-01-08T15:28:50.447311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:50.459998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:28:50.482849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2026-01-08T15:28:50.630967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:50.853335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:28:50.876412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2026-01-08T15:28:53.770174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013127325381579:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.770351Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.771003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013127325381608:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.771056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013127325381609:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.771167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.777037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:28:53.790351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013127325381644:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.811074Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013127325381612:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2026-01-08T15:28:53.811142Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:53.891417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013105850544108:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:53.891741Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:28:54.055425Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013127325381678:2647] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:28:54.092912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:54.138175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:54.213037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:54.229924Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013131620349002:2348], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:28:54.231939Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=NWExNGFhM2ItYmNjYWY0OS0yY2ZlMTU2YS00MDg2OGRk, ActorId: [1:7593013127325381577:2330], ActorState: ExecuteState, LegacyTraceId: 01kef3k6frdf0bdmvs76kwq5sb, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:28:54.234005Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013131620349274:2830] === CheckClustersList. Ok 2026-01-08T15:29:00.686529Z :HappyWay INFO: TTopicSdkTestSetup started 2026-01-08T15:29:00.717834Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:29:00.736306Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013157390153289:2947] connected; active server actors: 1 2026-01-08T15:29:00.736548Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:29:00.739275Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, Lo ... : partition.cpp:2337: [72075186224037895][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.615164Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.615175Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037895][Partition][2][StateIdle] Try persist 2026-01-08T15:30:19.615208Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037897][Partition][4][StateIdle] Process user action and tx events 2026-01-08T15:30:19.615221Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.615233Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037897][Partition][4][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.615250Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.615260Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037897][Partition][4][StateIdle] Try persist 2026-01-08T15:30:19.615290Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][3][StateIdle] Process user action and tx events 2026-01-08T15:30:19.615301Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.615313Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][3][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.615327Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.615337Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][3][StateIdle] Try persist 2026-01-08T15:30:19.655595Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037898][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:30:19.655642Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.655665Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037898][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.655697Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.655718Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037898][Partition][0][StateIdle] Try persist 2026-01-08T15:30:19.718600Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:30:19.718646Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718665Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.718691Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718712Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:30:19.718779Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][1][StateIdle] Process user action and tx events 2026-01-08T15:30:19.718790Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718799Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][1][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.718811Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718818Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][1][StateIdle] Try persist 2026-01-08T15:30:19.718847Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037895][Partition][2][StateIdle] Process user action and tx events 2026-01-08T15:30:19.718857Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718865Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037895][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.718878Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718886Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037895][Partition][2][StateIdle] Try persist 2026-01-08T15:30:19.718913Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037897][Partition][4][StateIdle] Process user action and tx events 2026-01-08T15:30:19.718921Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718928Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037897][Partition][4][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.718940Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718947Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037897][Partition][4][StateIdle] Try persist 2026-01-08T15:30:19.718973Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][3][StateIdle] Process user action and tx events 2026-01-08T15:30:19.718986Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.718995Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][3][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.719005Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.719013Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][3][StateIdle] Try persist 2026-01-08T15:30:19.758674Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037898][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:30:19.758716Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.758736Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037898][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.758764Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.758781Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037898][Partition][0][StateIdle] Try persist 2026-01-08T15:30:19.818780Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:30:19.818833Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.818858Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.818888Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.818907Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:30:19.818975Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][1][StateIdle] Process user action and tx events 2026-01-08T15:30:19.818987Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.818997Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][1][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.819011Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819022Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][1][StateIdle] Try persist 2026-01-08T15:30:19.819052Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037895][Partition][2][StateIdle] Process user action and tx events 2026-01-08T15:30:19.819062Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819072Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037895][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.819083Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037895][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819092Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037895][Partition][2][StateIdle] Try persist 2026-01-08T15:30:19.819120Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037897][Partition][4][StateIdle] Process user action and tx events 2026-01-08T15:30:19.819131Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819140Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037897][Partition][4][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.819152Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037897][Partition][4][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819161Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037897][Partition][4][StateIdle] Try persist 2026-01-08T15:30:19.819189Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][3][StateIdle] Process user action and tx events 2026-01-08T15:30:19.819202Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819210Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][3][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.819222Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][3][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.819231Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][3][StateIdle] Try persist 2026-01-08T15:30:19.862873Z node 6 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037898][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:30:19.862917Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.862940Z node 6 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037898][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:30:19.862969Z node 6 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037898][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:30:19.862990Z node 6 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037898][Partition][0][StateIdle] Try persist |86.1%| [TM] {BAZEL_UPLOAD} ydb/core/persqueue/public/fetcher/ut/unittest >> TTxDataShardLocalKMeansScan::BuildToBuildWithOverlap [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges >> TMemoryController::GroupedMemoryLimiter_ConfigCS [GOOD] >> TMemoryController::ColumnShardCaches_Config >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/ydb-core-tx-schemeshard-ut_topic_set_boundaries |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/ydb-core-tx-schemeshard-ut_topic_set_boundaries |86.1%| [TM] {RESULT} ydb/core/persqueue/public/fetcher/ut/unittest |86.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_set_boundaries/ydb-core-tx-schemeshard-ut_topic_set_boundaries |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> KqpBatchDelete::Large_2 |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |86.1%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> QuoterWithKesusTest::PrefetchCoefficient [GOOD] >> QuoterWithKesusTest::GetsQuotaAfterPause >> KqpBatchUpdate::DisableFlags-UseSink-UseBatchUpdates [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |86.1%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] |86.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2V2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatusV1 |86.1%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |86.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::DisableFlags-UseSink-UseBatchUpdates [GOOD] Test command err: Trying to start YDB, gRPC: 19397, MsgBus: 61672 2026-01-08T15:30:13.636288Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013470581936453:2218];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:13.636352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:14.664673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:14.664793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:14.696728Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:14.696936Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:14.801041Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:14.814554Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013470581936244:2081] 1767886213574071 != 1767886213574074 2026-01-08T15:30:14.815243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:15.035387Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:15.161641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:15.161667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:15.161673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:15.161749Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:16.270038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:16.287134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:30:16.377465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:16.825550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:17.111974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:17.216911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:18.635829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013470581936453:2218];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:18.635896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:20.848701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013500646709198:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:20.848814Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:20.854788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013500646709208:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:20.854880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:21.595873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.653081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.876143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.947628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.021826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.104740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.181636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.303780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.545481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013509236644710:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.545586Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.546134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013509236644715:2494], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.546172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013509236644716:2495], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.546206Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.551161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:22.589723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013509236644719:2496], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:22.726570Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013509236644772:3784] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:26.003277Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013522121546946:2543], status: PRECONDITION_FAILED, issues:
: Error: Execution, code: 1060
:4:41: Error: BATCH operations are not supported at the current time., code: 2029
: Error: Execution, code: 1060
:4:41: Error: BATCH operations are not supported at the current time., code: 2029 2026-01-08T15:30:26.005792Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=YjZhZTA3My05NjUyZjQ3OC1lOGMzYzM1Mi0yZTg3ZTAxNQ==, ActorId: [1:7593013522121546935:2536], ActorState: ExecuteState, LegacyTraceId: 01kef3p0dqepe043werr0kpfps, ReplyQueryCompileError, remove tx status# PRECONDITION_FAILED issues# { message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 4 column: 41 } message: "BATCH operations are not supported at the current time." end_position { row: 4 column: 41 } issue_code: 2029 severity: 1 } }{ message: "Execution" issue_code: 1060 severity: 1 issues { position { row: 4 column: 41 } message: "BATCH operations are not supported at the current time." end_position { row: 4 column: 41 } issue_code: 2029 severity: 1 } } tx_id# trace_id# |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |86.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |86.1%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |86.1%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] |86.1%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |86.1%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> CoordinatorVolatile::PlanResentOnReboots [GOOD] >> CoordinatorVolatile::MediatorReconnectPlanRace >> TSelectFromViewTest::OneTable [GOOD] >> TSelectFromViewTest::OneTableUsingRelativeName >> TSchemeShardAuditSettings::AlterSubdomain |86.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |86.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |86.1%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> KqpBatchUpdate::UpdateOn [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] |86.2%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |86.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::UpdateOn [GOOD] Test command err: Trying to start YDB, gRPC: 31818, MsgBus: 25504 2026-01-08T15:30:19.185040Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013496482252597:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:19.185360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:19.815851Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:19.911356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:19.911468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:20.034458Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013496482252423:2081] 1767886219160486 != 1767886219160489 2026-01-08T15:30:20.040549Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:20.089529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:20.106252Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:20.204542Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:20.422949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:20.422976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:20.422983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:20.423152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:21.584189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:21.610786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:30:21.850600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.248149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.719188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.863003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:24.182449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013496482252597:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:24.182515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:26.787110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013526547025388:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:26.787225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:26.787703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013526547025398:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:26.787790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:27.548719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.609237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.698818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.767329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.819907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.894389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:27.964645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.089305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.246996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013535136960897:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.247089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.247484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013535136960902:2493], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.247523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013535136960903:2494], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.247634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.252168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:28.269186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013535136960906:2495], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:28.371951Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013535136960957:3783] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:30.992458Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013543726895857:2546], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with ON 2026-01-08T15:30:30.993508Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=OWRkNmEwOTAtN2U1YmNlYjEtN2I2YzA2OC04YmEyNWMxNA==, ActorId: [1:7593013543726895848:2540], ActorState: ExecuteState, LegacyTraceId: 01kef3p5dmed2jjy2dd81w41wa, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { position { row: 2 column: 22 } message: "BATCH UPDATE is unsupported with ON" end_position { row: 2 column: 22 } severity: 1 } tx_id# trace_id# |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest >> TSchemeShardAuditSettings::CreateSubdomain >> TMemoryController::ColumnShardCaches_Config [GOOD] >> TMemTableMemoryConsumersCollection::Empty [GOOD] >> TMemTableMemoryConsumersCollection::Destruction [GOOD] >> TMemTableMemoryConsumersCollection::Register [GOOD] >> TMemTableMemoryConsumersCollection::Unregister [GOOD] >> TMemTableMemoryConsumersCollection::SetConsumption [GOOD] >> TMemTableMemoryConsumersCollection::CompactionComplete [GOOD] >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] >> VDiskTest::HugeBlobWrite [GOOD] |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |86.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile >> TSchemeShardAuditSettings::CreateExtSubdomain >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobWrite [GOOD] Test command err: Put id# [29:1:1:0:0:1048576:1] totalSize# 0 blobValueIndex# 45 Trim Put id# [25:1:1:0:0:1572864:1] totalSize# 1048576 blobValueIndex# 56 Put id# [81:1:1:0:0:589824:1] totalSize# 2621440 blobValueIndex# 33 Put id# [11:1:1:0:0:40960:1] totalSize# 3211264 blobValueIndex# 23 Change MinHugeBlobSize# 65536 Put id# [68:1:1:0:0:10:1] totalSize# 3252224 blobValueIndex# 8 Put id# [28:1:1:0:0:1572864:1] totalSize# 3252234 blobValueIndex# 54 Put id# [78:1:1:0:0:40960:1] totalSize# 4825098 blobValueIndex# 20 Put id# [4:1:1:0:0:40960:1] totalSize# 4866058 blobValueIndex# 25 Put id# [29:1:2:0:0:40960:1] totalSize# 4907018 blobValueIndex# 26 Put id# [55:1:1:0:0:1572864:1] totalSize# 4947978 blobValueIndex# 51 Put id# [21:1:1:0:0:1048576:1] totalSize# 6520842 blobValueIndex# 44 Put id# [25:1:2:0:0:589824:1] totalSize# 7569418 blobValueIndex# 37 Put id# [27:1:1:0:0:589824:1] totalSize# 8159242 blobValueIndex# 39 Change MinHugeBlobSize# 8192 Restart Put id# [90:1:1:0:0:1024:1] totalSize# 8749066 blobValueIndex# 19 Change MinHugeBlobSize# 61440 Trim Put id# [93:1:1:0:0:1572864:1] totalSize# 8750090 blobValueIndex# 52 Put id# [40:1:1:0:0:1024:1] totalSize# 10322954 blobValueIndex# 19 Trim Put id# [44:1:1:0:0:1048576:1] totalSize# 10323978 blobValueIndex# 45 Change MinHugeBlobSize# 8192 Put id# [23:1:1:0:0:10:1] totalSize# 11372554 blobValueIndex# 0 Trim Put id# [96:1:1:0:0:1048576:1] totalSize# 11372564 blobValueIndex# 48 Trim Put id# [63:1:1:0:0:589824:1] totalSize# 12421140 blobValueIndex# 30 Put id# [99:1:1:0:0:10:1] totalSize# 13010964 blobValueIndex# 8 Change MinHugeBlobSize# 12288 Put id# [73:1:1:0:0:1048576:1] totalSize# 13010974 blobValueIndex# 47 Change MinHugeBlobSize# 524288 Put id# [79:1:1:0:0:40960:1] totalSize# 14059550 blobValueIndex# 29 Put id# [18:1:1:0:0:40960:1] totalSize# 14100510 blobValueIndex# 27 Trim Put id# [68:1:2:0:0:10:1] totalSize# 14141470 blobValueIndex# 8 Trim Put id# [100:1:1:0:0:1024:1] totalSize# 14141480 blobValueIndex# 16 Put id# [34:1:1:0:0:589824:1] totalSize# 14142504 blobValueIndex# 37 Put id# [31:1:1:0:0:1024:1] totalSize# 14732328 blobValueIndex# 15 Put id# [98:1:1:0:0:1024:1] totalSize# 14733352 blobValueIndex# 11 Put id# [60:1:1:0:0:589824:1] totalSize# 14734376 blobValueIndex# 38 Change MinHugeBlobSize# 12288 Put id# [15:1:1:0:0:10:1] totalSize# 15324200 blobValueIndex# 5 Put id# [37:1:1:0:0:1048576:1] totalSize# 15324210 blobValueIndex# 40 Put id# [24:1:1:0:0:10:1] totalSize# 16372786 blobValueIndex# 6 Change MinHugeBlobSize# 65536 Put id# [84:1:1:0:0:1572864:1] totalSize# 16372796 blobValueIndex# 52 Put id# [56:1:1:0:0:1024:1] totalSize# 17945660 blobValueIndex# 15 Put id# [53:1:1:0:0:40960:1] totalSize# 17946684 blobValueIndex# 24 Restart Put id# [65:1:1:0:0:40960:1] totalSize# 17987644 blobValueIndex# 25 Put id# [68:1:3:0:0:10:1] totalSize# 18028604 blobValueIndex# 6 Put id# [2:1:1:0:0:1048576:1] totalSize# 18028614 blobValueIndex# 45 Put id# [76:1:1:0:0:589824:1] totalSize# 19077190 blobValueIndex# 36 Put id# [23:1:2:0:0:1024:1] totalSize# 19667014 blobValueIndex# 14 Put id# [20:1:1:0:0:1024:1] totalSize# 19668038 blobValueIndex# 18 Trim Put id# [59:1:1:0:0:589824:1] totalSize# 19669062 blobValueIndex# 36 Put id# [59:1:2:0:0:1048576:1] totalSize# 20258886 blobValueIndex# 41 Trim Put id# [18:1:2:0:0:10:1] totalSize# 21307462 blobValueIndex# 6 Put id# [27:1:2:0:0:1572864:1] totalSize# 21307472 blobValueIndex# 58 Change MinHugeBlobSize# 8192 Put id# [70:1:1:0:0:1572864:1] totalSize# 22880336 blobValueIndex# 52 Trim Put id# [86:1:1:0:0:1572864:1] totalSize# 24453200 blobValueIndex# 58 Change MinHugeBlobSize# 61440 Put id# [82:1:1:0:0:1024:1] totalSize# 26026064 blobValueIndex# 11 Put id# [71:1:1:0:0:589824:1] totalSize# 26027088 blobValueIndex# 32 Put id# [46:1:1:0:0:1024:1] totalSize# 26616912 blobValueIndex# 10 Put id# [29:1:3:0:0:1048576:1] totalSize# 26617936 blobValueIndex# 46 Restart Put id# [54:1:1:0:0:1048576:1] totalSize# 27666512 blobValueIndex# 40 Trim Put id# [93:1:2:0:0:589824:1] totalSize# 28715088 blobValueIndex# 32 Put id# [96:1:2:0:0:10:1] totalSize# 29304912 blobValueIndex# 0 Put id# [19:1:1:0:0:589824:1] totalSize# 29304922 blobValueIndex# 32 Change MinHugeBlobSize# 12288 Put id# [82:1:2:0:0:10:1] totalSize# 29894746 blobValueIndex# 0 Change MinHugeBlobSize# 65536 Put id# [11:1:2:0:0:40960:1] totalSize# 29894756 blobValueIndex# 26 Put id# [35:1:1:0:0:40960:1] totalSize# 29935716 blobValueIndex# 23 Put id# [52:1:1:0:0:1024:1] totalSize# 29976676 blobValueIndex# 14 Trim Put id# [26:1:1:0:0:10:1] totalSize# 29977700 blobValueIndex# 0 Put id# [21:1:2:0:0:1572864:1] totalSize# 29977710 blobValueIndex# 55 Put id# [12:1:1:0:0:40960:1] totalSize# 31550574 blobValueIndex# 23 Put id# [23:1:3:0:0:1048576:1] totalSize# 31591534 blobValueIndex# 47 Put id# [73:1:2:0:0:1572864:1] totalSize# 32640110 blobValueIndex# 55 Put id# [78:1:2:0:0:589824:1] totalSize# 34212974 blobValueIndex# 36 Put id# [40:1:2:0:0:589824:1] totalSize# 34802798 blobValueIndex# 31 Put id# [35:1:2:0:0:1572864:1] totalSize# 35392622 blobValueIndex# 51 Put id# [100:1:2:0:0:1024:1] totalSize# 36965486 blobValueIndex# 11 Put id# [72:1:1:0:0:1572864:1] totalSize# 36966510 blobValueIndex# 54 Put id# [94:1:1:0:0:10:1] totalSize# 38539374 blobValueIndex# 1 Put id# [21:1:3:0:0:10:1] totalSize# 38539384 blobValueIndex# 1 Put id# [61:1:1:0:0:589824:1] totalSize# 38539394 blobValueIndex# 31 Put id# [93:1:3:0:0:10:1] totalSize# 39129218 blobValueIndex# 2 Put id# [26:1:2:0:0:1572864:1] totalSize# 39129228 blobValueIndex# 50 Put id# [44:1:2:0:0:589824:1] totalSize# 40702092 blobValueIndex# 36 Put id# [94:1:2:0:0:589824:1] totalSize# 41291916 blobValueIndex# 35 Trim Put id# [36:1:1:0:0:1048576:1] totalSize# 41881740 blobValueIndex# 42 Put id# [8:1:1:0:0:10:1] totalSize# 42930316 blobValueIndex# 8 Change MinHugeBlobSize# 12288 Put id# [30:1:1:0:0:589824:1] totalSize# 42930326 blobValueIndex# 31 Restart Put id# [72:1:2:0:0:1572864:1] totalSize# 43520150 blobValueIndex# 52 Put id# [94:1:3:0:0:40960:1] totalSize# 45093014 blobValueIndex# 27 Put id# [92:1:1:0:0:10:1] totalSize# 45133974 blobValueIndex# 8 Trim Put id# [1:1:1:0:0:1048576:1] totalSize# 45133984 blobValueIndex# 48 Put id# [84:1:2:0:0:589824:1] totalSize# 46182560 blobValueIndex# 39 Trim Put id# [51:1:1:0:0:589824:1] totalSize# 46772384 blobValueIndex# 39 Change MinHugeBlobSize# 8192 Put id# [57:1:1:0:0:1024:1] totalSize# 47362208 blobValueIndex# 16 Put id# [53:1:2:0:0:1572864:1] totalSize# 47363232 blobValueIndex# 58 Change MinHugeBlobSize# 12288 Put id# [63:1:2:0:0:589824:1] totalSize# 48936096 blobValueIndex# 35 Put id# [20:1:2:0:0:1024:1] totalSize# 49525920 blobValueIndex# 16 Put id# [23:1:4:0:0:589824:1] totalSize# 49526944 blobValueIndex# 39 Put id# [29:1:4:0:0:10:1] totalSize# 50116768 blobValueIndex# 7 Put id# [40:1:3:0:0:1048576:1] totalSize# 50116778 blobValueIndex# 43 Put id# [92:1:2:0:0:1024:1] totalSize# 51165354 blobValueIndex# 11 Put id# [12:1:2:0:0:40960:1] totalSize# 51166378 blobValueIndex# 23 Put id# [93:1:4:0:0:1048576:1] totalSize# 51207338 blobValueIndex# 44 Put id# [91:1:1:0:0:1048576:1] totalSize# 52255914 blobValueIndex# 46 Change MinHugeBlobSize# 65536 Put id# [66:1:1:0:0:40960:1] totalSize# 53304490 blobValueIndex# 20 Put id# [96:1:3:0:0:589824:1] totalSize# 53345450 blobValueIndex# 36 Put id# [16:1:1:0:0:1024:1] totalSize# 53935274 blobValueIndex# 11 Put id# [59:1:3:0:0:1048576:1] totalSize# 53936298 blobValueIndex# 49 Change MinHugeBlobSize# 524288 Put id# [49:1:1:0:0:40960:1] totalSize# 54984874 blobValueIndex# 21 Trim Put id# [28:1:2:0:0:10:1] totalSize# 55025834 blobValueIndex# 3 Put id# [52:1:2:0:0:40960:1] totalSize# 55025844 blobValueIndex# 29 Put id# [65:1:2:0:0:1024:1] totalSize# 55066804 blobValueIndex# 15 Put id# [62:1:1:0:0:40960:1] totalSize# 55067828 blobValueIndex# 21 Trim Put id# [63:1:3:0:0:1048576:1] totalSize# 55108788 blobValueIndex# 41 Trim Put id# [5:1:1:0:0:40960:1] totalSize# 56157364 blobValueIndex# 28 Trim Put id# [67:1:1:0:0:589824:1] totalSize# 56198324 blobValueIndex# 37 Trim Put id# [13:1:1:0:0:589824:1] totalSize# 56788148 blobValueIndex# 35 Put id# [32:1:1:0:0:10:1] totalSize# 57377972 blobValueIndex# 1 Put id# [90:1:2:0:0:10:1] totalSize# 57377982 blobValueIndex# 6 Put id# [16:1:2:0:0:40960:1] totalSize# 57377992 blobValueIndex# 25 Put id# [6:1:1:0:0:1048576:1] totalSize# 57418952 blobValueIndex# 49 Put id# [55:1:2:0:0:1572864:1] totalSize# 58467528 blobValueIndex# 52 Trim Put id# [99:1:2:0:0:1024:1] totalSize# 60040392 blobValueIndex# 10 Put id# [43:1:1:0:0:589824:1] totalSize# 60041416 blobValueIndex# 30 Put id# [89:1:1:0:0:10:1] totalSize# 60631240 blobValueIndex# 6 Put id# [94:1:4:0:0:1024:1] totalSize# 60631250 blobValueIndex# 16 Put id# [47:1:1:0:0:1048576:1] totalSize# 60632274 blobValueIndex# 43 Put id# [1:1:2:0:0:10:1] totalSize# 61680850 blobValueIndex# 5 Change MinHugeBlobSize# 12288 Put id# [33:1:1:0:0:10:1] totalSize# 61680860 blobValueIndex# 2 Trim Put id# [1:1:3:0:0:589824:1] totalSize# 61680870 blobValueIndex# 34 Put id# [77:1:1:0:0:40960:1] totalSize# 62270694 blobValueIndex# 26 Put id# [34:1:2:0:0:1024:1] totalSize# 62311654 blobValueIndex# 19 Put id# [55:1:3:0:0:1572864:1] totalSize# 62312678 blobValueIndex# 56 Put id# [91:1:2:0:0:10:1] totalSize# 63885542 blobValueIndex# 1 Put id# [81:1:2:0:0:1572864:1] totalSize# 63885552 blobValueIndex# 53 Put id# [80:1:1:0:0:10:1] totalSize# 65458416 blobValueIndex# 3 Put id# [23:1:5:0:0:1572864:1] totalSize# 65458426 blobValueIndex# 58 Change MinHugeBlobSize# 65536 Put id# [32:1:2:0:0:10:1] totalSize# 67031290 blobValueIndex# 9 Put id# [31:1:2:0:0:40960:1] totalSize# 67031300 blobValueIndex# 23 Change MinHugeBlobSize# 12288 Trim Put id# [41:1:1:0:0:589824:1] totalSize# 67072260 blobValueIndex# 33 Put id# [43:1:2:0:0:1048576:1] totalSize# 67662084 blobValueIndex# 42 Put id# [40:1:4:0:0:1572864:1] totalSize# 68710660 blobValueIndex# 50 Trim Put id# [33:1:2:0:0:1048576:1] totalSize# 70283524 blobValueIndex# 45 Trim Restart Put id# [10:1:1:0:0:1572864:1] totalSize# 71332100 blobValueIndex# 58 Put id# [68:1:4:0:0:589824:1] totalSize# 72904964 blobValueIndex# 32 Put id# [49:1:2:0:0:10:1] totalSize# 73494788 blobValueIndex# 8 Change MinHugeBlobSize# 65536 Put id# [23:1:6:0:0:40960:1] totalSize# 73494798 blobValueIndex# 28 Put id# [58:1:1:0:0:1024:1] totalSize# 73535758 blobValueIndex# 16 Restart Put id# [19:1:2:0:0:10:1] totalSize# 73536782 blobValueIndex# 9 Put id# [81:1:3:0:0:10:1] totalSize# 73536792 blobValueIndex# 5 Put id# [68:1:5:0:0:1024:1] totalSize# 73536802 blobValueIndex# 14 Put id# [28:1:3:0:0:40960:1] totalSize# 73537826 blobValueIndex# 23 Put id# [26:1:3:0:0:1024:1] totalSize# 73578786 blobValueIndex# 19 Put id# [90:1:3:0:0:40960:1] totalSize# 73579810 blobValueIndex# 26 Put id# [37:1:2:0:0:589824:1] totalSize# 73620770 blobValueIndex# 34 Trim Put id# [3:1:1:0:0:1024:1] totalSize# 74210594 blobValueIndex# 18 Trim Put id# [28:1:4:0:0:1572864:1] totalSize# 74211618 blobValueIndex# 59 Put id# [100:1:3:0:0:1048576:1] totalSize# 75784482 blobValueIndex# 42 Trim Put id# [96:1:4:0:0:1048576:1] totalSize# 76833058 blobValueIndex# 47 Put id# [58:1:2:0:0:40960:1] totalSize# 77881634 blobValueIndex# 27 Put id# [62:1:2:0:0:1048576:1] totalSize# 77922594 blobValueIndex# 48 Put id# [72:1:3:0:0:10:1] totalSize# 78971170 blobValueIndex# 4 Put id# [15:1:2:0:0:1048576:1] totalSize# 78971180 blobValueIndex# 45 Restart Put id# [14:1:1:0:0:1572864:1] totalSize# 80019756 blobValueIndex# 51 Put id# [27:1:3:0:0:40960:1] totalSize# 81592620 blobValueIndex# 23 Put id# [32:1:3:0:0:589824:1] totalSize# 81633580 blobValueIndex# 30 Put id# [25:1:3:0:0:589824:1] totalSize# 82223404 blobValueIndex# 33 Restart Put id# [100:1:4:0:0:1024:1] totalSize# 82813228 blobValueIndex# 17 Put id# [34:1:3:0:0:10:1] totalSize# 82814252 blobValueIndex# 9 Put id# [28:1:5:0:0:1024:1] totalSize# 82814262 blobValueIndex# 15 Put id# [66:1:2:0:0:1024:1] totalSize# 82815286 blobValueIndex# 14 Change MinHugeBlobSize# 524288 Put id# [98:1:2:0:0:589824:1] tot ... :18:0:0:1024:1] totalSize# 989551704 blobValueIndex# 15 Put id# [71:1:31:0:0:1048576:1] totalSize# 989552728 blobValueIndex# 41 Put id# [43:1:22:0:0:1048576:1] totalSize# 990601304 blobValueIndex# 43 Change MinHugeBlobSize# 8192 Trim Put id# [14:1:20:0:0:1048576:1] totalSize# 991649880 blobValueIndex# 49 Change MinHugeBlobSize# 12288 Put id# [30:1:15:0:0:589824:1] totalSize# 992698456 blobValueIndex# 30 Trim Put id# [50:1:20:0:0:10:1] totalSize# 993288280 blobValueIndex# 2 Put id# [67:1:26:0:0:589824:1] totalSize# 993288290 blobValueIndex# 36 Put id# [22:1:18:0:0:10:1] totalSize# 993878114 blobValueIndex# 8 Change MinHugeBlobSize# 61440 Trim Put id# [19:1:20:0:0:1572864:1] totalSize# 993878124 blobValueIndex# 55 Change MinHugeBlobSize# 65536 Put id# [74:1:16:0:0:10:1] totalSize# 995450988 blobValueIndex# 7 Change MinHugeBlobSize# 8192 Put id# [47:1:17:0:0:589824:1] totalSize# 995450998 blobValueIndex# 37 Put id# [3:1:23:0:0:1572864:1] totalSize# 996040822 blobValueIndex# 54 Put id# [67:1:27:0:0:40960:1] totalSize# 997613686 blobValueIndex# 22 Put id# [59:1:24:0:0:10:1] totalSize# 997654646 blobValueIndex# 7 Restart Put id# [46:1:17:0:0:10:1] totalSize# 997654656 blobValueIndex# 6 Trim Put id# [16:1:16:0:0:1024:1] totalSize# 997654666 blobValueIndex# 10 Put id# [40:1:17:0:0:1048576:1] totalSize# 997655690 blobValueIndex# 49 Put id# [29:1:23:0:0:40960:1] totalSize# 998704266 blobValueIndex# 29 Put id# [63:1:19:0:0:1572864:1] totalSize# 998745226 blobValueIndex# 57 Put id# [77:1:29:0:0:1048576:1] totalSize# 1000318090 blobValueIndex# 42 Change MinHugeBlobSize# 524288 Put id# [27:1:28:0:0:1048576:1] totalSize# 1001366666 blobValueIndex# 49 Put id# [66:1:16:0:0:589824:1] totalSize# 1002415242 blobValueIndex# 31 Put id# [59:1:25:0:0:10:1] totalSize# 1003005066 blobValueIndex# 0 Put id# [39:1:17:0:0:1024:1] totalSize# 1003005076 blobValueIndex# 12 Put id# [57:1:14:0:0:40960:1] totalSize# 1003006100 blobValueIndex# 29 Put id# [62:1:17:0:0:589824:1] totalSize# 1003047060 blobValueIndex# 32 Put id# [12:1:23:0:0:1048576:1] totalSize# 1003636884 blobValueIndex# 41 Put id# [40:1:18:0:0:589824:1] totalSize# 1004685460 blobValueIndex# 31 Put id# [89:1:17:0:0:589824:1] totalSize# 1005275284 blobValueIndex# 37 Trim Put id# [89:1:18:0:0:1572864:1] totalSize# 1005865108 blobValueIndex# 53 Put id# [69:1:18:0:0:589824:1] totalSize# 1007437972 blobValueIndex# 33 Put id# [53:1:25:0:0:10:1] totalSize# 1008027796 blobValueIndex# 0 Trim Put id# [67:1:28:0:0:1572864:1] totalSize# 1008027806 blobValueIndex# 58 Trim Put id# [12:1:24:0:0:1048576:1] totalSize# 1009600670 blobValueIndex# 43 Put id# [25:1:18:0:0:589824:1] totalSize# 1010649246 blobValueIndex# 35 Put id# [20:1:18:0:0:10:1] totalSize# 1011239070 blobValueIndex# 8 Put id# [25:1:19:0:0:40960:1] totalSize# 1011239080 blobValueIndex# 20 Restart Put id# [75:1:13:0:0:1024:1] totalSize# 1011280040 blobValueIndex# 15 Put id# [20:1:19:0:0:589824:1] totalSize# 1011281064 blobValueIndex# 39 Change MinHugeBlobSize# 61440 Restart Put id# [52:1:18:0:0:40960:1] totalSize# 1011870888 blobValueIndex# 26 Put id# [17:1:20:0:0:10:1] totalSize# 1011911848 blobValueIndex# 2 Put id# [33:1:14:0:0:40960:1] totalSize# 1011911858 blobValueIndex# 29 Change MinHugeBlobSize# 8192 Put id# [76:1:19:0:0:10:1] totalSize# 1011952818 blobValueIndex# 3 Put id# [75:1:14:0:0:1572864:1] totalSize# 1011952828 blobValueIndex# 58 Put id# [60:1:22:0:0:1024:1] totalSize# 1013525692 blobValueIndex# 16 Put id# [68:1:20:0:0:589824:1] totalSize# 1013526716 blobValueIndex# 39 Put id# [55:1:22:0:0:40960:1] totalSize# 1014116540 blobValueIndex# 20 Put id# [4:1:9:0:0:589824:1] totalSize# 1014157500 blobValueIndex# 35 Put id# [72:1:24:0:0:10:1] totalSize# 1014747324 blobValueIndex# 5 Put id# [27:1:29:0:0:1024:1] totalSize# 1014747334 blobValueIndex# 18 Put id# [51:1:25:0:0:1024:1] totalSize# 1014748358 blobValueIndex# 11 Put id# [93:1:15:0:0:1024:1] totalSize# 1014749382 blobValueIndex# 19 Trim Put id# [67:1:29:0:0:40960:1] totalSize# 1014750406 blobValueIndex# 29 Put id# [11:1:16:0:0:1048576:1] totalSize# 1014791366 blobValueIndex# 41 Put id# [21:1:20:0:0:589824:1] totalSize# 1015839942 blobValueIndex# 30 Trim Put id# [45:1:14:0:0:10:1] totalSize# 1016429766 blobValueIndex# 2 Put id# [63:1:20:0:0:1048576:1] totalSize# 1016429776 blobValueIndex# 46 Put id# [13:1:17:0:0:1024:1] totalSize# 1017478352 blobValueIndex# 19 Put id# [48:1:17:0:0:1024:1] totalSize# 1017479376 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Put id# [59:1:26:0:0:1048576:1] totalSize# 1017480400 blobValueIndex# 46 Put id# [66:1:17:0:0:1024:1] totalSize# 1018528976 blobValueIndex# 13 Put id# [77:1:30:0:0:589824:1] totalSize# 1018530000 blobValueIndex# 30 Put id# [84:1:23:0:0:1048576:1] totalSize# 1019119824 blobValueIndex# 46 Put id# [6:1:26:0:0:1572864:1] totalSize# 1020168400 blobValueIndex# 55 Put id# [86:1:19:0:0:1572864:1] totalSize# 1021741264 blobValueIndex# 56 Trim Put id# [43:1:23:0:0:40960:1] totalSize# 1023314128 blobValueIndex# 29 Trim Put id# [8:1:14:0:0:1048576:1] totalSize# 1023355088 blobValueIndex# 40 Put id# [43:1:24:0:0:1572864:1] totalSize# 1024403664 blobValueIndex# 52 Put id# [27:1:30:0:0:589824:1] totalSize# 1025976528 blobValueIndex# 36 Put id# [69:1:19:0:0:1572864:1] totalSize# 1026566352 blobValueIndex# 57 Put id# [69:1:20:0:0:1572864:1] totalSize# 1028139216 blobValueIndex# 52 Put id# [72:1:25:0:0:1572864:1] totalSize# 1029712080 blobValueIndex# 51 Put id# [33:1:15:0:0:1048576:1] totalSize# 1031284944 blobValueIndex# 41 Put id# [78:1:16:0:0:1572864:1] totalSize# 1032333520 blobValueIndex# 57 Restart Put id# [47:1:18:0:0:40960:1] totalSize# 1033906384 blobValueIndex# 23 Put id# [70:1:18:0:0:1572864:1] totalSize# 1033947344 blobValueIndex# 52 Put id# [2:1:26:0:0:1024:1] totalSize# 1035520208 blobValueIndex# 12 Put id# [71:1:32:0:0:1048576:1] totalSize# 1035521232 blobValueIndex# 46 Put id# [54:1:20:0:0:1048576:1] totalSize# 1036569808 blobValueIndex# 46 Trim Put id# [20:1:20:0:0:10:1] totalSize# 1037618384 blobValueIndex# 0 Change MinHugeBlobSize# 524288 Put id# [79:1:28:0:0:40960:1] totalSize# 1037618394 blobValueIndex# 23 Put id# [5:1:24:0:0:40960:1] totalSize# 1037659354 blobValueIndex# 27 Put id# [73:1:15:0:0:1048576:1] totalSize# 1037700314 blobValueIndex# 47 Put id# [74:1:17:0:0:1048576:1] totalSize# 1038748890 blobValueIndex# 47 Put id# [76:1:20:0:0:10:1] totalSize# 1039797466 blobValueIndex# 6 Put id# [99:1:22:0:0:1572864:1] totalSize# 1039797476 blobValueIndex# 54 Trim Put id# [20:1:21:0:0:10:1] totalSize# 1041370340 blobValueIndex# 3 Put id# [25:1:20:0:0:589824:1] totalSize# 1041370350 blobValueIndex# 38 Put id# [58:1:22:0:0:589824:1] totalSize# 1041960174 blobValueIndex# 37 Put id# [87:1:16:0:0:10:1] totalSize# 1042549998 blobValueIndex# 1 Put id# [40:1:19:0:0:10:1] totalSize# 1042550008 blobValueIndex# 3 Restart Put id# [73:1:16:0:0:10:1] totalSize# 1042550018 blobValueIndex# 0 Trim Put id# [86:1:20:0:0:10:1] totalSize# 1042550028 blobValueIndex# 5 Put id# [71:1:33:0:0:1024:1] totalSize# 1042550038 blobValueIndex# 12 Put id# [16:1:17:0:0:589824:1] totalSize# 1042551062 blobValueIndex# 35 Put id# [29:1:24:0:0:1048576:1] totalSize# 1043140886 blobValueIndex# 49 Put id# [5:1:25:0:0:1048576:1] totalSize# 1044189462 blobValueIndex# 47 Put id# [43:1:25:0:0:1572864:1] totalSize# 1045238038 blobValueIndex# 54 Put id# [87:1:17:0:0:1572864:1] totalSize# 1046810902 blobValueIndex# 52 Put id# [17:1:21:0:0:1572864:1] totalSize# 1048383766 blobValueIndex# 58 Put id# [71:1:34:0:0:589824:1] totalSize# 1049956630 blobValueIndex# 31 Trim Put id# [87:1:18:0:0:40960:1] totalSize# 1050546454 blobValueIndex# 29 Put id# [99:1:23:0:0:40960:1] totalSize# 1050587414 blobValueIndex# 20 Put id# [12:1:25:0:0:1024:1] totalSize# 1050628374 blobValueIndex# 19 Trim Put id# [32:1:25:0:0:1048576:1] totalSize# 1050629398 blobValueIndex# 40 Trim Put id# [67:1:30:0:0:10:1] totalSize# 1051677974 blobValueIndex# 3 Change MinHugeBlobSize# 65536 Put id# [23:1:35:0:0:10:1] totalSize# 1051677984 blobValueIndex# 6 Put id# [66:1:18:0:0:1572864:1] totalSize# 1051677994 blobValueIndex# 50 Put id# [88:1:12:0:0:1572864:1] totalSize# 1053250858 blobValueIndex# 52 Put id# [12:1:26:0:0:1572864:1] totalSize# 1054823722 blobValueIndex# 58 Put id# [77:1:31:0:0:1572864:1] totalSize# 1056396586 blobValueIndex# 56 Put id# [37:1:19:0:0:1024:1] totalSize# 1057969450 blobValueIndex# 10 Put id# [26:1:19:0:0:10:1] totalSize# 1057970474 blobValueIndex# 9 Put id# [28:1:28:0:0:40960:1] totalSize# 1057970484 blobValueIndex# 26 Put id# [79:1:29:0:0:1024:1] totalSize# 1058011444 blobValueIndex# 18 Put id# [5:1:26:0:0:1048576:1] totalSize# 1058012468 blobValueIndex# 47 Put id# [62:1:18:0:0:1048576:1] totalSize# 1059061044 blobValueIndex# 46 Put id# [72:1:26:0:0:1024:1] totalSize# 1060109620 blobValueIndex# 12 Change MinHugeBlobSize# 8192 Put id# [41:1:20:0:0:589824:1] totalSize# 1060110644 blobValueIndex# 33 Put id# [22:1:19:0:0:1572864:1] totalSize# 1060700468 blobValueIndex# 56 Put id# [3:1:24:0:0:40960:1] totalSize# 1062273332 blobValueIndex# 29 Put id# [15:1:13:0:0:1024:1] totalSize# 1062314292 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Trim Put id# [15:1:14:0:0:10:1] totalSize# 1062315316 blobValueIndex# 0 Put id# [50:1:21:0:0:1024:1] totalSize# 1062315326 blobValueIndex# 17 Put id# [56:1:13:0:0:1048576:1] totalSize# 1062316350 blobValueIndex# 45 Put id# [83:1:26:0:0:1572864:1] totalSize# 1063364926 blobValueIndex# 59 Restart Put id# [9:1:15:0:0:1048576:1] totalSize# 1064937790 blobValueIndex# 41 Trim Put id# [96:1:23:0:0:1048576:1] totalSize# 1065986366 blobValueIndex# 40 Trim Put id# [18:1:14:0:0:10:1] totalSize# 1067034942 blobValueIndex# 3 Put id# [66:1:19:0:0:1572864:1] totalSize# 1067034952 blobValueIndex# 54 Put id# [26:1:20:0:0:40960:1] totalSize# 1068607816 blobValueIndex# 25 Put id# [55:1:23:0:0:589824:1] totalSize# 1068648776 blobValueIndex# 37 Put id# [2:1:27:0:0:1572864:1] totalSize# 1069238600 blobValueIndex# 59 Put id# [62:1:19:0:0:589824:1] totalSize# 1070811464 blobValueIndex# 31 Put id# [82:1:31:0:0:589824:1] totalSize# 1071401288 blobValueIndex# 30 Put id# [32:1:26:0:0:1048576:1] totalSize# 1071991112 blobValueIndex# 44 Change MinHugeBlobSize# 65536 Put id# [4:1:10:0:0:1048576:1] totalSize# 1073039688 blobValueIndex# 41 Restart Put id# [74:1:18:0:0:1572864:1] totalSize# 1074088264 blobValueIndex# 52 Put id# [51:1:26:0:0:10:1] totalSize# 1075661128 blobValueIndex# 8 Put id# [63:1:21:0:0:1572864:1] totalSize# 1075661138 blobValueIndex# 57 Put id# [72:1:27:0:0:40960:1] totalSize# 1077234002 blobValueIndex# 28 Trim Put id# [11:1:17:0:0:1048576:1] totalSize# 1077274962 blobValueIndex# 45 Put id# [6:1:27:0:0:1048576:1] totalSize# 1078323538 blobValueIndex# 47 Put id# [57:1:15:0:0:589824:1] totalSize# 1079372114 blobValueIndex# 37 Restart Put id# [20:1:22:0:0:1572864:1] totalSize# 1079961938 blobValueIndex# 59 Put id# [95:1:12:0:0:1572864:1] totalSize# 1081534802 blobValueIndex# 53 Put id# [4:1:11:0:0:40960:1] totalSize# 1083107666 blobValueIndex# 23 Trim Put id# [21:1:21:0:0:40960:1] totalSize# 1083148626 blobValueIndex# 29 Put id# [8:1:15:0:0:1572864:1] totalSize# 1083189586 blobValueIndex# 53 Trim Put id# [29:1:25:0:0:589824:1] totalSize# 1084762450 blobValueIndex# 33 Put id# [3:1:25:0:0:1572864:1] totalSize# 1085352274 blobValueIndex# 58 Put id# [76:1:21:0:0:40960:1] totalSize# 1086925138 blobValueIndex# 22 Put id# [33:1:16:0:0:40960:1] totalSize# 1086966098 blobValueIndex# 29 Trim Put id# [49:1:20:0:0:10:1] totalSize# 1087007058 blobValueIndex# 1 Put id# [22:1:20:0:0:589824:1] totalSize# 1087007068 blobValueIndex# 31 Put id# [61:1:25:0:0:1048576:1] totalSize# 1087596892 blobValueIndex# 47 Restart Put id# [72:1:28:0:0:40960:1] totalSize# 1088645468 blobValueIndex# 28 Restart Put id# [37:1:20:0:0:1048576:1] totalSize# 1088686428 blobValueIndex# 47 Change MinHugeBlobSize# 12288 Put id# [26:1:21:0:0:40960:1] totalSize# 1089735004 blobValueIndex# 21 Put id# [40:1:20:0:0:1572864:1] totalSize# 1089775964 blobValueIndex# 54 Restart |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/blobstorage/ut_vdisk2/unittest |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/memory_controller/ut/unittest >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] Test command err: ResourceBrokerSelfConfig: LimitBytes: 0B ... waiting for SysViewsRoster update finished 2026-01-08T15:28:23.054740Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2026-01-08T15:28:23.056832Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1479: Bootstrap with config MemoryLimit: 33554432 2026-01-08T15:28:26.859020Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:235: Periodic memory stats: AnonRss: none CGroupLimit: none MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 200MiB SoftLimit: 150MiB TargetUtilization: 100MiB ActivitiesLimitBytes: 60MiB ConsumersConsumption: 0B OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 100MiB ResultingConsumersConsumption: 6MiB Coefficient: 0.9999990463 2026-01-08T15:28:26.859672Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer MemTable state: Consumption: 0B Limit: 6MiB Min: 2MiB Max: 6MiB 2026-01-08T15:28:26.859813Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesPortionsMetaDataCache state: Consumption: 0B Limit: 12.5MiB Min: 12.5MiB Max: 12.5MiB 2026-01-08T15:28:26.859972Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:443: Consumer QueryExecution state: Consumption: 0B Limit: 40MiB 2026-01-08T15:28:26.860085Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:458: Apply ResourceBroker config: LimitBytes: 60MiB queue_cs_general: 7.5MiB queue_cs_indexation: 2.5MiB queue_cs_normalizer: 7.5MiB queue_cs_ttl: 2.5MiB queue_kqp_resource_manager: 40MiB 2026-01-08T15:28:26.861076Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:161: Bootstrapped with config HardLimitBytes: 209715200 2026-01-08T15:28:26.862094Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:26.862808Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer SharedCache [1:20:2067] registered 2026-01-08T15:28:26.984238Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesBlobCache [1:21:2068] registered 2026-01-08T15:28:26.984588Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:238: Register memory consumer 2026-01-08T15:28:26.984672Z node 1 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:234: GC has finished with Limit: 32MiB Active: 0B Passive: 0B LoadInFly: 0B EvictedInMemoryBytes: 0B 2026-01-08T15:28:26.985253Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesDataAccessorCache [1:22:2069] registered 2026-01-08T15:28:26.986232Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_cs_general" Limit { Memory: 7864320 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 2621440 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 7864320 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 2621440 } } Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 41943040 } } ResourceLimit { Memory: 62914560 } 2026-01-08T15:28:26.987070Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 2621440 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 2621440 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 7864320 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 7864320 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 41943040 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_system_tablet_backup" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } Tasks { Name: "system_tablet_backup" QueueName: "queue_system_tablet_backup" DefaultDuration: 60000000 } ResourceLimit { Cpu: 256 Memory: 62914560 } 2026-01-08T15:28:26.988295Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2026-01-08T15:28:26.988696Z node 1 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:234: GC has finished with Limit: 32MiB Active: 0B Passive: 0B LoadInFly: 0B EvictedInMemoryBytes: 0B 2026-01-08T15:28:26.989006Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesColumnDataCache [1:23:2070] registered 2026-01-08T15:28:26.998256Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesScanGroupedMemory [1:51:2098] registered 2026-01-08T15:28:27.002364Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesCompGroupedMemory [1:52:2099] registered 2026-01-08T15:28:27.002980Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:305: Consumer ColumnTablesDeduplicationGroupedMemory [1:53:2100] registered 2026-01-08T15:28:27.003101Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:335: ResourceBroker configure result Success: true 2026-01-08T15:28:27.005120Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:27.011480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:317:2361], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:27.011959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:27.012100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:27.054027Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:428:2393] 1 registered 2026-01-08T15:28:27.067029Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:440:2395] 0 registered 2026-01-08T15:28:27.073281Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:440:2395] 2 registered 2026-01-08T15:28:27.073601Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:440:2395] 4 registered 2026-01-08T15:28:27.074685Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:440:2395] 5 registered 2026-01-08T15:28:27.075948Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:450:2397] 1 registered 2026-01-08T15:28:27.080006Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:450:2397] 2 registered 2026-01-08T15:28:27.120930Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 1 registered 2026-01-08T15:28:27.162906Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 2 registered 2026-01-08T15:28:27.164416Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 3 registered 2026-01-08T15:28:27.164787Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 4 registered 2026-01-08T15:28:27.165122Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 5 registered 2026-01-08T15:28:27.165240Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 6 registered 2026-01-08T15:28:27.165429Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 7 registered 2026-01-08T15:28:27.165823Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 8 registered 2026-01-08T15:28:27.166771Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 9 registered 2026-01-08T15:28:27.167031Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 10 registered 2026-01-08T15:28:27.167943Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 11 registered 2026-01-08T15:28:27.168109Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 12 registered 2026-01-08T15:28:27.168169Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: MemTable [1:458:2399] 13 registered 2026-01-08T15:28:27.168312Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:312: Me ... in: 50MiB Max: 50MiB 2026-01-08T15:30:34.721003Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesBlobCache state: Consumption: 0B Limit: 6.25MiB Min: 6.25MiB Max: 6.25MiB 2026-01-08T15:30:34.721038Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDataAccessorCache state: Consumption: 0B Limit: 6.25MiB Min: 6.25MiB Max: 6.25MiB 2026-01-08T15:30:34.721076Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesColumnDataCache state: Consumption: 0B Limit: 6.25MiB Min: 6.25MiB Max: 6.25MiB 2026-01-08T15:30:34.721113Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDeduplicationGroupedMemory state: Consumption: 0B Limit: 50MiB Min: 50MiB Max: 50MiB 2026-01-08T15:30:34.721151Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesPortionsMetaDataCache state: Consumption: 0B Limit: 31.3MiB Min: 31.3MiB Max: 31.3MiB 2026-01-08T15:30:34.721197Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:443: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 100MiB 2026-01-08T15:30:34.721317Z node 12 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:248: Limit memory consumer with 236MiB 2026-01-08T15:30:34.721357Z node 12 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:234: GC has finished with Limit: 32MiB Active: 0B Passive: 0B LoadInFly: 0B EvictedInMemoryBytes: 0B 2026-01-08T15:30:34.943512Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:235: Periodic memory stats: AnonRss: none CGroupLimit: 1.95GiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 1.95GiB SoftLimit: 1.46GiB TargetUtilization: 1000MiB ActivitiesLimitBytes: 600MiB ConsumersConsumption: 172KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 1000MiB ResultingConsumersConsumption: 1000MiB Coefficient: 0.90625 2026-01-08T15:30:34.944335Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer SharedCache state: Consumption: 0B Limit: 944MiB Min: 400MiB Max: 1000MiB 2026-01-08T15:30:34.944412Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer MemTable state: Consumption: 172KiB Limit: 56.3MiB Min: 20MiB Max: 60MiB 2026-01-08T15:30:34.944454Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesScanGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:34.944499Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesCompGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:34.944537Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesBlobCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:34.944572Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDataAccessorCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:34.944603Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesColumnDataCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:34.944641Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDeduplicationGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:34.944679Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesPortionsMetaDataCache state: Consumption: 0B Limit: 125MiB Min: 125MiB Max: 125MiB 2026-01-08T15:30:34.944748Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:443: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 400MiB 2026-01-08T15:30:34.944857Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:458: Apply ResourceBroker config: LimitBytes: 600MiB queue_cs_general: 75MiB queue_cs_indexation: 25MiB queue_cs_normalizer: 75MiB queue_cs_ttl: 25MiB queue_kqp_resource_manager: 400MiB 2026-01-08T15:30:34.945248Z node 12 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_cs_general" Limit { Memory: 78643200 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 26214400 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 78643200 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 26214400 } } Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 419430400 } } ResourceLimit { Memory: 629145600 } 2026-01-08T15:30:34.946191Z node 12 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 26214400 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 26214400 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 78643200 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 78643200 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 419430400 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_system_tablet_backup" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } Tasks { Name: "system_tablet_backup" QueueName: "queue_system_tablet_backup" DefaultDuration: 60000000 } ResourceLimit { Cpu: 256 Memory: 629145600 } 2026-01-08T15:30:34.951112Z node 12 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2026-01-08T15:30:34.951706Z node 12 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:248: Limit memory consumer with 944MiB 2026-01-08T15:30:34.951797Z node 12 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:234: GC has finished with Limit: 32MiB Active: 0B Passive: 0B LoadInFly: 0B EvictedInMemoryBytes: 0B 2026-01-08T15:30:34.952345Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:335: ResourceBroker configure result Success: true 2026-01-08T15:30:35.113180Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:235: Periodic memory stats: AnonRss: none CGroupLimit: 1.95GiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 1.95GiB SoftLimit: 1.46GiB TargetUtilization: 1000MiB ActivitiesLimitBytes: 600MiB ConsumersConsumption: 172KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 1000MiB ResultingConsumersConsumption: 1000MiB Coefficient: 0.90625 2026-01-08T15:30:35.113790Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer SharedCache state: Consumption: 0B Limit: 944MiB Min: 400MiB Max: 1000MiB 2026-01-08T15:30:35.113853Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer MemTable state: Consumption: 172KiB Limit: 56.3MiB Min: 20MiB Max: 60MiB 2026-01-08T15:30:35.113884Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesScanGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:35.113925Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesCompGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:35.113959Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesBlobCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:35.113995Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDataAccessorCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:35.114025Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesColumnDataCache state: Consumption: 0B Limit: 25MiB Min: 25MiB Max: 25MiB 2026-01-08T15:30:35.114060Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesDeduplicationGroupedMemory state: Consumption: 0B Limit: 200MiB Min: 200MiB Max: 200MiB 2026-01-08T15:30:35.114094Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:280: Consumer ColumnTablesPortionsMetaDataCache state: Consumption: 0B Limit: 125MiB Min: 125MiB Max: 125MiB 2026-01-08T15:30:35.114143Z node 12 :MEMORY_CONTROLLER INFO: memory_controller.cpp:443: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 400MiB 2026-01-08T15:30:35.114271Z node 12 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:248: Limit memory consumer with 944MiB 2026-01-08T15:30:35.114343Z node 12 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:234: GC has finished with Limit: 32MiB Active: 0B Passive: 0B LoadInFly: 0B EvictedInMemoryBytes: 0B |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/memory_controller/ut/unittest >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatusV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatusV2 |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |86.2%| [TM] {RESULT} ydb/core/memory_controller/ut/unittest |86.2%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:30:29.430857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:30:29.430975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:29.431026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:30:29.431069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:30:29.431147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:30:29.431191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:30:29.431281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:29.431365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:30:29.432415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:30:29.432801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:30:29.655129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:30:29.655206Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:29.679977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:30:29.680576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:30:29.680796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:30:29.729481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:30:29.730608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:29.731467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:29.731874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:30:29.736363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:29.736607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:30:29.738087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:29.738169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:29.738289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:30:29.738374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:29.738490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:30:29.738729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:30:30.327009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.328970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:30.329694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... at step: 5000112 FAKE_COORDINATOR: advance: minStep5000112 State->FrontStep: 5000111 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000112 2026-01-08T15:30:37.263820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000112, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:37.264002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000112 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:30:37.264057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:190: TDropExtSubdomain TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000112, at schemeshard: 72057594046678944 2026-01-08T15:30:37.264144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5626: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 62] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:37.264174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5642: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 62] 2026-01-08T15:30:37.264208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 175:0 128 -> 134 2026-01-08T15:30:37.265306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:37.267598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:37.269331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2026-01-08T15:30:37.269408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:137: TDropExtSubdomain TDeleteExternalShards, operationId: 175:0 ProgressState, at schemeshard: 72057594046678944 2026-01-08T15:30:37.269545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 175:0 134 -> 135 2026-01-08T15:30:37.269709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:37.269778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 2 FAKE_COORDINATOR: Erasing txId 175 2026-01-08T15:30:37.272350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:37.272407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:37.272582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 62] 2026-01-08T15:30:37.272713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:37.272752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2026-01-08T15:30:37.272792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 175, path id: 62 2026-01-08T15:30:37.273324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2026-01-08T15:30:37.273378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:29: [72057594046678944] TDeleteSubdomainSystemShards opId# 175:0 ProgressState 2026-01-08T15:30:37.273413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 175:0 135 -> 240 2026-01-08T15:30:37.274152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:37.274258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:37.274297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:37.274352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 106 2026-01-08T15:30:37.274392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:30:37.275397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:37.275500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:37.275533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:37.275564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 62], version: 18446744073709551615 2026-01-08T15:30:37.275600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 3 2026-01-08T15:30:37.275689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2026-01-08T15:30:37.278258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2026-01-08T15:30:37.278337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 175:0 ProgressState 2026-01-08T15:30:37.278423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:37.278461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:37.278498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:37.278524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:37.278553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2026-01-08T15:30:37.278587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:37.278617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 175:0 2026-01-08T15:30:37.278644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 175:0 2026-01-08T15:30:37.278705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 2 2026-01-08T15:30:37.279455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:37.279509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:37.279573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 1 2026-01-08T15:30:37.280633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:37.280682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:37.280755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:37.281799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:37.281972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:37.284785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:30:37.284916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2026-01-08T15:30:37.286490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2026-01-08T15:30:37.286540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2026-01-08T15:30:37.288184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2026-01-08T15:30:37.288321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2026-01-08T15:30:37.288359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:3042:5030] TestWaitNotification: OK eventTxId 175 |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:30:36.403156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:30:36.403231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:36.403292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:30:36.403332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:30:36.403391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:30:36.403450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:30:36.403527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:36.403590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:30:36.404578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:30:36.404905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:30:36.510302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:30:36.510381Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:36.527272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:30:36.527557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:30:36.527729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:30:36.553428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:30:36.553839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:36.554606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:36.557179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:30:36.561509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:36.561705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:30:36.562899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:36.562963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:36.563076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:30:36.563119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:36.563222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:30:36.563378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:30:36.728749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.733960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:36.734646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... pp:665: Send tablet strongly msg operationId: 112:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:112 msg type: 269090816 2026-01-08T15:30:37.831104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 112, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000049 FAKE_COORDINATOR: advance: minStep5000049 State->FrontStep: 5000048 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000049 2026-01-08T15:30:37.831594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000049, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:37.831704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000049 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:30:37.831743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000049, at schemeshard: 72057594046678944 2026-01-08T15:30:37.831796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5626: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 43] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:37.831825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5642: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:30:37.831939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 112:0 128 -> 130 2026-01-08T15:30:37.832103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:37.832151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:30:37.832881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:37.834956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 FAKE_COORDINATOR: Erasing txId 112 2026-01-08T15:30:37.835623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:37.835659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:37.835794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:30:37.835909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:37.835941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2026-01-08T15:30:37.836002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 112, path id: 43 2026-01-08T15:30:37.836101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2026-01-08T15:30:37.836137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2026-01-08T15:30:37.836199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#112:0 progress is 1/1 2026-01-08T15:30:37.836224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:37.836265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#112:0 progress is 1/1 2026-01-08T15:30:37.836293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:37.836318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: false 2026-01-08T15:30:37.836345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:37.836374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 112:0 2026-01-08T15:30:37.836396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 112:0 2026-01-08T15:30:37.836446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 3 2026-01-08T15:30:37.836475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 112, publications: 2, subscribers: 0 2026-01-08T15:30:37.836496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 1], 30 2026-01-08T15:30:37.836517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 43], 18446744073709551615 2026-01-08T15:30:37.837353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 30 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:37.837454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 30 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:37.837488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2026-01-08T15:30:37.837514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 30 2026-01-08T15:30:37.837543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:30:37.838420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:37.838495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:37.838526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2026-01-08T15:30:37.838556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 18446744073709551615 2026-01-08T15:30:37.838584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:30:37.838652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2026-01-08T15:30:37.839234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:37.839285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 43], at schemeshard: 72057594046678944 2026-01-08T15:30:37.839374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 2026-01-08T15:30:37.839819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:37.839856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 43], at schemeshard: 72057594046678944 2026-01-08T15:30:37.839909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:37.841883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:37.843639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:37.843751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:30:37.843832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2026-01-08T15:30:37.844209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2026-01-08T15:30:37.844244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2026-01-08T15:30:37.844843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2026-01-08T15:30:37.844942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2026-01-08T15:30:37.844981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:1060:3050] TestWaitNotification: OK eventTxId 112 |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest |86.2%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |86.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardStats::NoData [GOOD] >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] >> DataShardStats::Follower >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] |86.2%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> TCmsTest::ManualRequestApprovalLockingAllNodes |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |86.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |86.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:30:37.840574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:30:37.840659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:37.840720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:30:37.840757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:30:37.840837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:30:37.840881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:30:37.840935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:37.840999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:30:37.841894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:30:37.842226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:30:37.931772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:30:37.931829Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:37.942638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:30:37.942954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:30:37.943141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:30:37.955700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:30:37.956166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:37.956956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:37.957712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:30:37.960974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:37.961150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:30:37.962353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:37.962428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:37.962546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:30:37.962592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:37.962695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:30:37.962861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:30:38.128760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.129831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.129974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.130915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.131013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.131105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:38.131225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 112 at step: 5000049 FAKE_COORDINATOR: advance: minStep5000049 State->FrontStep: 5000048 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000049 2026-01-08T15:30:39.347649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000049, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:39.347735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000049 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:30:39.347774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:190: TDropExtSubdomain TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000049, at schemeshard: 72057594046678944 2026-01-08T15:30:39.347860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5626: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 43] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:39.347894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5642: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:30:39.347939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 112:0 128 -> 134 2026-01-08T15:30:39.349237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:39.350726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:39.351756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2026-01-08T15:30:39.351815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:137: TDropExtSubdomain TDeleteExternalShards, operationId: 112:0 ProgressState, at schemeshard: 72057594046678944 2026-01-08T15:30:39.351964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 112:0 134 -> 135 2026-01-08T15:30:39.352103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:39.352173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 FAKE_COORDINATOR: Erasing txId 112 2026-01-08T15:30:39.353897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:39.353950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:39.354105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:30:39.354217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:39.354262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2026-01-08T15:30:39.354305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 112, path id: 43 2026-01-08T15:30:39.354729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2026-01-08T15:30:39.354771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:29: [72057594046678944] TDeleteSubdomainSystemShards opId# 112:0 ProgressState 2026-01-08T15:30:39.354800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 112:0 135 -> 240 2026-01-08T15:30:39.355402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 30 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:39.355481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 30 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:39.355512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2026-01-08T15:30:39.355593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 30 2026-01-08T15:30:39.355631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:30:39.357009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:39.357082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2026-01-08T15:30:39.357117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2026-01-08T15:30:39.357156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 18446744073709551615 2026-01-08T15:30:39.357195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 3 2026-01-08T15:30:39.357262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 112, ready parts: 0/1, is published: true 2026-01-08T15:30:39.358857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2026-01-08T15:30:39.358910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 112:0 ProgressState 2026-01-08T15:30:39.358982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#112:0 progress is 1/1 2026-01-08T15:30:39.359012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:39.359048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#112:0 progress is 1/1 2026-01-08T15:30:39.359073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:39.359119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: true 2026-01-08T15:30:39.359172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2026-01-08T15:30:39.359207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 112:0 2026-01-08T15:30:39.359246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 112:0 2026-01-08T15:30:39.359323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:30:39.359710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:39.359750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 43], at schemeshard: 72057594046678944 2026-01-08T15:30:39.359798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 2026-01-08T15:30:39.360068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:39.360114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 43], at schemeshard: 72057594046678944 2026-01-08T15:30:39.360176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:39.361156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:39.362208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2026-01-08T15:30:39.364350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:30:39.364446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2026-01-08T15:30:39.364787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2026-01-08T15:30:39.364826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2026-01-08T15:30:39.365342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2026-01-08T15:30:39.365441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2026-01-08T15:30:39.365472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:1060:3050] TestWaitNotification: OK eventTxId 112 |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TMLPDLQMoverTests::MoveToDLQ_BigMessage [GOOD] >> TMLPDLQMoverTests::MoveToDLQ_ManyMessages >> CoordinatorVolatile::MediatorReconnectPlanRace [GOOD] >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx >> QuoterWithKesusTest::GetsQuotaAfterPause [GOOD] >> QuoterWithKesusTest::GetsSeveralQuotas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:30:33.333183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:30:33.333307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:33.333352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:30:33.333392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:30:33.333462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:30:33.333501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:30:33.333556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:33.333639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:30:33.334674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:30:33.335009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:30:33.551226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:30:33.551286Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:33.580294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:30:33.580636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:30:33.580854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:30:33.611386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:30:33.611866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:33.612631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:33.613460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:30:33.627496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:33.627720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:30:33.632767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:33.632886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:33.633063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:30:33.633118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:33.633222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:30:33.633409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:30:33.964470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.965603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.965751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.965863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.965934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.966007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.966073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.966167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.966274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.970600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.970736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.970861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.970947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.971053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:33.971134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 65: Send tablet strongly msg operationId: 175:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:175 msg type: 269090816 2026-01-08T15:30:40.047476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 175, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 175 at step: 5000112 FAKE_COORDINATOR: advance: minStep5000112 State->FrontStep: 5000111 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000112 2026-01-08T15:30:40.048034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000112, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:40.048141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000112 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:30:40.048208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000112, at schemeshard: 72057594046678944 2026-01-08T15:30:40.048252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5626: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 62] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:40.048281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5642: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 62] 2026-01-08T15:30:40.048391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 175:0 128 -> 130 2026-01-08T15:30:40.048596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:40.048670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 2 2026-01-08T15:30:40.049973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:40.051933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 FAKE_COORDINATOR: Erasing txId 175 2026-01-08T15:30:40.055826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:40.055879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:40.056071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 62] 2026-01-08T15:30:40.056235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:40.056280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2026-01-08T15:30:40.056339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 175, path id: 62 2026-01-08T15:30:40.056639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2026-01-08T15:30:40.056688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2026-01-08T15:30:40.056755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:40.056783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:40.056817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:40.056844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:40.056885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: false 2026-01-08T15:30:40.056927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:40.056959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 175:0 2026-01-08T15:30:40.056987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 175:0 2026-01-08T15:30:40.057087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 3 2026-01-08T15:30:40.057128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 175, publications: 2, subscribers: 0 2026-01-08T15:30:40.057157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 1], 106 2026-01-08T15:30:40.057185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 62], 18446744073709551615 2026-01-08T15:30:40.058227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:40.058353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:40.058390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:40.058423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 106 2026-01-08T15:30:40.058471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:30:40.060009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:40.060089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:40.060118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:40.060153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 62], version: 18446744073709551615 2026-01-08T15:30:40.060205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 2 2026-01-08T15:30:40.060291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 175, subscribers: 0 2026-01-08T15:30:40.060667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:40.060707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:40.060791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 1 2026-01-08T15:30:40.061312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:40.061351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:40.061419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:40.063271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:40.065392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:40.065491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:30:40.065573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2026-01-08T15:30:40.066786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2026-01-08T15:30:40.066828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2026-01-08T15:30:40.068197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2026-01-08T15:30:40.068313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2026-01-08T15:30:40.068347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2903:4892] TestWaitNotification: OK eventTxId 175 |86.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TPQTest::TestSourceIdDropByUserWrites |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |86.2%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] |86.2%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> DataStreams::TestControlPlaneAndMeteringData |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] |86.2%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/scheme_shard/py3test >> TCmsTest::ManualRequestApprovalLockingAllNodes [GOOD] >> TCmsTest::ManualRequestApprovalWithPartialAlreadyApproved >> DataStreams::TestReservedResourcesMetering |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] |86.2%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |86.2%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> DataStreams::TestStreamStorageRetention >> DataStreams::TestPutRecordsOfAnauthorizedUser >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] >> TSelectFromViewTest::OneTableUsingRelativeName [GOOD] >> TSelectFromViewTest::DisabledFeatureFlag >> TCmsTest::ManualRequestApprovalWithPartialAlreadyApproved [GOOD] >> TCmsTest::Notifications >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> DataShardVolatile::DistributedWriteThenDropTable >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatusV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadOverrunV1 >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut_service/slow/ydb-core-kqp-federated_query-ut_service-slow |86.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut_service/slow/ydb-core-kqp-federated_query-ut_service-slow |86.3%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut_service/slow/ydb-core-kqp-federated_query-ut_service-slow >> DataShardVolatile::DistributedWrite >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx [GOOD] >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep >> TCmsTest::Notifications [GOOD] >> TCmsTest::Mirror3dcPermissions >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:30:34.492339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:30:34.492423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:34.492461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:30:34.492506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:30:34.492573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:30:34.492628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:30:34.492682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:30:34.492739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:30:34.493422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:30:34.493629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:30:34.587170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:30:34.587234Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:34.600971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:30:34.601308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:30:34.601479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:30:34.639699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:30:34.640177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:34.640846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:30:34.641815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:30:34.650108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:34.650294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:30:34.651450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:30:34.651514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:30:34.651626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:30:34.651667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:30:34.651796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:30:34.651969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:30:34.819731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.829768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.829965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:30:34.830957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 954201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:29: [72057594046678944] TDeleteSubdomainSystemShards opId# 175:0 ProgressState 2026-01-08T15:30:47.954237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 175:0 135 -> 240 2026-01-08T15:30:47.955203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:47.955310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 106 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:47.955344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:47.955377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 106 2026-01-08T15:30:47.955412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:30:47.956532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:47.970747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 62 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2026-01-08T15:30:47.970836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2026-01-08T15:30:47.970888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 62], version: 18446744073709551615 2026-01-08T15:30:47.970927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 6 2026-01-08T15:30:47.971046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2026-01-08T15:30:47.976425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:20: SendDeleteRequests, shardsToDelete 3, to hive 72057594037968897, at schemeshard 72057594046678944 2026-01-08T15:30:47.976521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:47: Free shard 72057594046678944:74 hive 72057594037968897 at ss 72057594046678944 2026-01-08T15:30:47.976631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:47: Free shard 72057594046678944:73 hive 72057594037968897 at ss 72057594046678944 2026-01-08T15:30:47.976659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:47: Free shard 72057594046678944:75 hive 72057594037968897 at ss 72057594046678944 2026-01-08T15:30:47.977313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2026-01-08T15:30:47.977363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 175:0 ProgressState 2026-01-08T15:30:47.977441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:47.977470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:47.977505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#175:0 progress is 1/1 2026-01-08T15:30:47.977531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:47.977562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2026-01-08T15:30:47.977594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2026-01-08T15:30:47.977640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 175:0 2026-01-08T15:30:47.977667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 175:0 2026-01-08T15:30:47.977816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 5 2026-01-08T15:30:47.979313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:47.979778Z node 1 :HIVE INFO: tablet_helpers.cpp:1510: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 74 TxId_Deprecated: 0 TabletID: 72075186233409619 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 74 TxId_Deprecated: 0 TabletID: 72075186233409619 Forgetting tablet 72075186233409619 2026-01-08T15:30:47.987404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 74, at schemeshard: 72057594046678944 2026-01-08T15:30:47.987694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 4 2026-01-08T15:30:47.988592Z node 1 :HIVE INFO: tablet_helpers.cpp:1510: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 73 TxId_Deprecated: 0 TabletID: 72075186233409618 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 73 TxId_Deprecated: 0 TabletID: 72075186233409618 2026-01-08T15:30:47.989705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:30:48.046589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 73, at schemeshard: 72057594046678944 2026-01-08T15:30:48.046926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 3 2026-01-08T15:30:48.047466Z node 1 :HIVE INFO: tablet_helpers.cpp:1510: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 75 TxId_Deprecated: 0 TabletID: 72075186233409620 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 75 TxId_Deprecated: 0 TabletID: 72075186233409620 Forgetting tablet 72075186233409618 2026-01-08T15:30:48.048858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 75, at schemeshard: 72057594046678944 2026-01-08T15:30:48.049213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 2 Forgetting tablet 72075186233409620 2026-01-08T15:30:48.051641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:48.051690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:48.051799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 62] was 1 2026-01-08T15:30:48.053117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2026-01-08T15:30:48.053322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:30:48.053360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 62], at schemeshard: 72057594046678944 2026-01-08T15:30:48.053424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:30:48.057518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:74 2026-01-08T15:30:48.057573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:182: Close pipe to deleted shardIdx 72057594046678944:74 tabletId 72075186233409619 2026-01-08T15:30:48.057665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:73 2026-01-08T15:30:48.057691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:182: Close pipe to deleted shardIdx 72057594046678944:73 tabletId 72075186233409618 2026-01-08T15:30:48.057894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:75 2026-01-08T15:30:48.057934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:182: Close pipe to deleted shardIdx 72057594046678944:75 tabletId 72075186233409620 2026-01-08T15:30:48.060122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:30:48.060230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2026-01-08T15:30:48.061532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2026-01-08T15:30:48.061571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2026-01-08T15:30:48.063005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2026-01-08T15:30:48.063114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2026-01-08T15:30:48.063147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:8304:9331] TestWaitNotification: OK eventTxId 175 |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgdate-pk_types6-all_types6-index6-pgdate--] [GOOD] >> TColumnShardTestReadWrite::WriteExoticTypes >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> DataStreams::ChangeBetweenRetentionModes >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgint4-pk_types4-all_types4-index4-pgint4--] [GOOD] >> THiveTest::TestDrain >> DataStreams::TestPutRecordsOfAnauthorizedUser [GOOD] >> DataStreams::TestPutRecordsWithRead >> DataStreams::TestUpdateStorage >> QuoterWithKesusTest::GetsSeveralQuotas [GOOD] >> QuoterWithKesusTest::KesusRecreation >> DataStreams::TestReservedResourcesMetering [GOOD] >> DataStreams::TestReservedStorageMetering |86.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |86.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges [GOOD] >> TTxDataShardPrefixKMeansScan::BadRequest >> DataShardVolatile::DistributedWriteThenDropTable [GOOD] >> DataShardVolatile::DistributedWriteThenImmediateUpsert >> DataShardStats::Follower [GOOD] >> DataShardStats::Tli >> TCmsTest::Mirror3dcPermissions [GOOD] >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |86.3%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] Test command err: 2026-01-08T15:30:41.946682Z node 1 :CMS ERROR: sentinel.cpp:854: [Sentinel] [StateUpdater] Cannot get pdisks state: nodeId# 1, reason# Disconnected 2026-01-08T15:30:48.730365Z node 21 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007325s 2026-01-08T15:30:52.115297Z node 27 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.005087s |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/cms/ut/unittest >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] >> KqpBatchUpdate::Large_2 [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] >> KeyValueGRPCService::SimpleWriteReadOverrunV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadOverrunV2 |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/knn/ydb-core-kqp-ut-knn |86.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/knn/ydb-core-kqp-ut-knn |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/knn/ydb-core-kqp-ut-knn >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/coordinator/ut/unittest >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:28:46.248731Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:46.249789Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:28:46.258309Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:685:2347], Scheduled retry for error: {
: Error: Scheme service not found } 2026-01-08T15:28:46.416302Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:28:46.417073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:299:2225], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:28:46.417749Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:46.418084Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:46.420196Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:28:46.420313Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:28:47.064477Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:28:47.198241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:47.198412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:47.198907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:47.198980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:47.274209Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:28:47.275291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:47.275746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:28:47.378999Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:28:47.426204Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) ... waiting for the first mediator step 2026-01-08T15:28:48.238557Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:48.238697Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... found first step to be 1500 2026-01-08T15:28:48.582268Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 1500 ... waiting for the next mediator step ... found second step to be 2000 ... read step subscribe result: [1500, 2000] ... read step subscribe update: 3000 2026-01-08T15:28:49.636013Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 3000 ... read step subscribe result: [3000, 3000] ... read step subscribe update: 3500 ... read step subscribe update: 3500 ... read step subscribe update: 4000 ... read step subscribe update: 5000 ... read step subscribe update: 6000 ... read step subscribe update: 7000 ... read step subscribe result: [3000, 7000] 2026-01-08T15:28:52.692964Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936129] NodeDisconnected NodeId# 2 2026-01-08T15:28:52.693156Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 2 2026-01-08T15:28:52.693191Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2026-01-08T15:28:52.693222Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037968897] NodeDisconnected NodeId# 2 2026-01-08T15:28:52.693250Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2026-01-08T15:28:52.694294Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:60} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:123:2098] ServerId# [1:1128:2696] TabletId# 72057594037932033 PipeClientId# [2:123:2098] 2026-01-08T15:28:52.700488Z node 1 :HIVE WARN: hive_impl.cpp:824: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeDisconnected, NodeId 2 2026-01-08T15:28:52.700636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnecting 2026-01-08T15:28:52.701329Z node 2 :TX_PROXY WARN: proxy_impl.cpp:226: actor# [2:258:2138] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2026-01-08T15:28:52.702001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnecting -> Disconnected 2026-01-08T15:28:52.712778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:28:52.747838Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:28:52.749207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... read step subscribe update: 8000 ... read step subscribe update: 9000 ... read step subscribe update: 10000 ... read step subscribe update: 11000 ... read step subscribe update: 12000 ... waiting for SysViewsRoster update finished 2026-01-08T15:29:03.237846Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:29:03.238935Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:29:03.241180Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:685:2347], Scheduled retry for error: {
: Error: Scheme service not found } 2026-01-08T15:29:03.251905Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:29:03.254398Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:29:03.256364Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:298:2224], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:29:03.256751Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:29:03.256888Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:29:03.258582Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:29:03.258790Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:29:03.632614Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:29:03.742605Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:03.742763Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:03.743465Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:29:03.743556Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:29:03.788154Z node 3 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2026-01-08T15:29:03.789166Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:03.789542Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:29:03.847655Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:29:03.898046Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:29:04.521969Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1487:2374] at coordinator 72057594046316545 with seqNo 123 and cookie 234 2026-01-08T15:29:04.579228Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:04.579368Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:29:04.906912Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1488:2375] at coordinator 72057594046316545 with seqNo 234 and cookie 345 2026-01-08T15:29:05.239856Z node 3 :TX_COORDINATOR DEBUG: ... 15:30:57.316860Z node 20 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:698: tablet: 72057594047365120, type: Dummy, boot ... waiting for blocked put responses 2026-01-08T15:30:57.347449Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 10000000 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:30:57.347573Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 10000000 step# 200 Status# 16 SEND to# [20:830:2725] Proxy marker# C1 ... coordinator 72057594046316545 gen 2 is planning step 200 2026-01-08T15:30:57.362851Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000000 has been planned 2026-01-08T15:30:57.362989Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000000 for mediator 72057594046382081 tablet 72057594047365120 ... blocking put [72057594046316545:2:10:1:24576:168:0] response ... waiting for planning for the required step ... coordinator 72057594046316545 gen 2 is planning step 250 ... starting a new coordinator instance ... waiting for migrated state 2026-01-08T15:30:57.401267Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:615: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2026-01-08T15:30:57.401869Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:615: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2026-01-08T15:30:57.612532Z node 20 :TX_COORDINATOR INFO: coordinator__init.cpp:120: tablet# 72057594046316545 CreateTxInit Complete ... blocking state response from [20:547:2395] to [20:874:2745] LastSentStep: 150 LastAcquiredStep: 0 LastConfirmedStep: 150 ... unblocking put responses and requests 2026-01-08T15:30:57.613840Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000000 stepId# 200 Status# 17 SEND EvProposeTransactionStatus to# [20:830:2725] Proxy 2026-01-08T15:30:57.616007Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:322: tablet# 72057594046382081 server# [20:557:2490] disconnnected 2026-01-08T15:30:57.616117Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:201: Actor# [20:558:2491] MediatorId# 72057594046382081 HANDLE TEvServerDisconnected server# [20:557:2490] ... trying to plan tx 10000011 ... waiting for planned another persistent tx 2026-01-08T15:30:57.639690Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594046382081 server# [20:881:2755] connected 2026-01-08T15:30:57.640086Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594046382081 HANDLE EvCoordinatorSync 2026-01-08T15:30:57.640157Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594046382081 SEND EvCoordinatorSyncResult to# [20:877:2753] Cookie# 1 CompleteStep# 150 LatestKnownStep# 150 SubjectiveTime# 102 Coordinator# 72057594046316545 2026-01-08T15:30:57.640412Z node 20 :TX_COORDINATOR NOTICE: coordinator_impl.cpp:412: tablet# 72057594046316545 HANDLE EvMediatorQueueRestart MediatorId# 72057594046382081 2026-01-08T15:30:57.640477Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 200, txid# 10000000 marker# C2 ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000000 } Step: 200 PrevStep: 0 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2026-01-08T15:30:57.645464Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 200 2026-01-08T15:30:57.645552Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [200] transactions [1] 2026-01-08T15:30:57.645710Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:558:2491] ExecQueue {TMediateStep From 150 To# 200Steps: {{TCoordinatorStep step# 200 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:877:2753]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M0 2026-01-08T15:30:57.645909Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:558:2491] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 150 To# 200Steps: {{TCoordinatorStep step# 200 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:877:2753]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M1 2026-01-08T15:30:57.645999Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND Ev to# [20:559:2492] step# 200 forTablet# 72057594047365120 txid# 10000000 marker# M3 2026-01-08T15:30:57.646079Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:559:2492] bucket.ActiveActor step# 200 2026-01-08T15:30:57.646135Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:560:2493] bucket.ActiveActor step# 200 2026-01-08T15:30:57.646245Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 200 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000000 AckTo# [20:877:2753]}}} marker# M4 2026-01-08T15:30:57.646440Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:560:2493] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 200} 2026-01-08T15:30:57.646592Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 10000011 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:30:57.646666Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 10000011 step# 250 Status# 16 SEND to# [20:830:2725] Proxy marker# C1 2026-01-08T15:30:57.646834Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 200} 2026-01-08T15:30:57.647412Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365120 Status: OK ServerId: [20:885:2758] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2026-01-08T15:30:57.647511Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:559:2492] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 200 MediatorId# 72057594046382081 TabletID 72057594047365120} ... observed tablet step: Transactions { TxId: 10000000 AckTo { RawX1: 0 RawX2: 0 } } Step: 200 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... coordinator 72057594046316545 gen 3 is planning step 250 2026-01-08T15:30:57.660248Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000011 has been planned 2026-01-08T15:30:57.660384Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000011 for mediator 72057594046382081 tablet 72057594047365120 2026-01-08T15:30:57.661318Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 250, txid# 10000011 marker# C2 2026-01-08T15:30:57.661422Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000011 stepId# 250 Status# 17 SEND EvProposeTransactionStatus to# [20:830:2725] Proxy ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000011 } Step: 250 PrevStep: 200 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2026-01-08T15:30:57.661777Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 250 2026-01-08T15:30:57.661850Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [250] transactions [1] 2026-01-08T15:30:57.662012Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:558:2491] ExecQueue {TMediateStep From 200 To# 250Steps: {{TCoordinatorStep step# 250 PrevStep# 200Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:877:2753]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M0 2026-01-08T15:30:57.662172Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:558:2491] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 200 To# 250Steps: {{TCoordinatorStep step# 250 PrevStep# 200Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:877:2753]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M1 2026-01-08T15:30:57.662266Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND Ev to# [20:559:2492] step# 250 forTablet# 72057594047365120 txid# 10000011 marker# M3 2026-01-08T15:30:57.662377Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:559:2492] bucket.ActiveActor step# 250 2026-01-08T15:30:57.662431Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:560:2493] bucket.ActiveActor step# 250 2026-01-08T15:30:57.662553Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 250 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000011 AckTo# [20:877:2753]}}} marker# M4 2026-01-08T15:30:57.662647Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:559:2492] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 250 MediatorId# 72057594046382081 TabletID 72057594047365120} 2026-01-08T15:30:57.662751Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:560:2493] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 250} 2026-01-08T15:30:57.662875Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 250} ... observed tablet step: Transactions { TxId: 10000011 AckTo { RawX1: 0 RawX2: 0 } } Step: 250 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... coordinator 72057594046316545 gen 3 is planning step 300 ... observed step: Step: 300 PrevStep: 250 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2026-01-08T15:30:57.674754Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:558:2491] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 250 To# 300Steps: {{TCoordinatorStep step# 300 PrevStep# 250}}} marker# M1 2026-01-08T15:30:57.674835Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:559:2492] bucket.ActiveActor step# 300 2026-01-08T15:30:57.674889Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:558:2491] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:560:2493] bucket.ActiveActor step# 300 2026-01-08T15:30:57.674941Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:559:2492] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 300} 2026-01-08T15:30:57.674989Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:560:2493] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 300} |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/coordinator/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> DataShardVolatile::DistributedWrite [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock >> TSelectFromViewTest::DisabledFeatureFlag [GOOD] >> TSelectFromViewTest::ReadTestCasesFromFiles >> KqpBatchDelete::Large_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 22670, MsgBus: 65036 2026-01-08T15:30:15.235922Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013480497830223:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:15.236134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:15.318561Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:15.892468Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:15.938501Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013480497830053:2081] 1767886215179444 != 1767886215179447 2026-01-08T15:30:15.970562Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:15.975883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:15.976015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:16.023085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:16.366134Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:16.386570Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:16.387413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:16.387428Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:16.387435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:16.387515Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:17.153651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:17.257355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:17.597386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:17.920999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:18.166682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:20.238638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013480497830223:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:20.238723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:21.019560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013506267635710:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:21.019677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:21.020664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013506267635720:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:21.020732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:21.421013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.460766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.499404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.561975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.612301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.831967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:21.919378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.027147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:22.145842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013510562603889:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.145935Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.146374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013510562603894:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.146455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013510562603895:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.146526Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:22.151669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:22.304923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013510562603898:2492], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:30:22.408850Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013510562603953:3775] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:24.377562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, bu ... e, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:40.861451Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:40.862838Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:40.862949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:40.866225Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013586264510568:2081] 1767886240536139 != 1767886240536142 2026-01-08T15:30:40.881348Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:40.911801Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:41.198489Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:41.199060Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:41.199071Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:41.199080Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:41.199155Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:41.581102Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:41.926811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:41.939320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:30:41.951358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:42.032471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:42.215294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:42.296921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:44.903614Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013603444381623:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:44.903698Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:44.904240Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013603444381633:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:44.904296Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.081447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.147373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.227641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.334946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.418091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.516844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.550503Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593013586264510712:2182];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:45.550589Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:45.624810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.752678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:45.990408Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013607739349818:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.990522Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.991019Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013607739349823:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.991071Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013607739349824:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.991182Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:45.996342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:46.021652Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013607739349827:2490], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:46.126258Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013612034317174:3774] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:49.201247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:55.850907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:30:55.850942Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest >> DataStreams::TestPutRecordsWithRead [GOOD] >> DataStreams::TestPutRecordsCornerCases >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] >> DataStreams::TestGetShardIterator >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation >> TMLPDLQMoverTests::MoveToDLQ_ManyMessages [GOOD] >> TMLPDLQMoverTests::MoveToDLQ_TopicNotExists |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |86.3%| [TM] {RESULT} ydb/core/tx/coordinator/ut/unittest |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 2540, MsgBus: 21730 2026-01-08T15:30:26.904216Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013526506426900:2218];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:26.904267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:27.554478Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:27.560932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:27.561105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:27.631196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:27.774885Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013526506426690:2081] 1767886226817784 != 1767886226817787 2026-01-08T15:30:27.803302Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:27.810181Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:27.851246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:27.851278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:27.851285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:27.851379Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:27.920305Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:28.523105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:28.602343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.842412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:29.153298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:29.246352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:31.614240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013547981265058:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:31.614388Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:31.618187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013547981265067:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:31.618261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:31.906284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013526506426900:2218];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:31.906386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:32.082907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.133970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.177333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.215162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.246245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.325612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.390570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.475612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:32.614518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013552276233251:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:32.614610Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:32.614990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013552276233256:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:32.615042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013552276233257:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:32.615336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:32.619345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:32.633439Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013552276233260:2490], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:32.702311Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013552276233312:3777] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:34.599824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_creat ... ution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:44.526409Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:44.564562Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:44.566969Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013601953294228:2081] 1767886244228003 != 1767886244228006 2026-01-08T15:30:44.637922Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:44.654480Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:44.725789Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:44.931289Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:44.966288Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:44.966340Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:44.966350Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:44.966443Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:45.289953Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:45.847679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:45.856738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:30:45.869973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:46.032007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:46.315136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:46.445540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:49.274536Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593013601953294414:2223];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:49.274616Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:50.764473Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013627723099880:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:50.764584Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:50.764895Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013627723099890:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:50.764939Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:50.898807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:50.971629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.040921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.112420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.211755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.380750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.469507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.592280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:51.750762Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013632018068089:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:51.750870Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:51.751283Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013632018068094:2492], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:51.751328Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013632018068095:2493], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:51.751436Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:51.755800Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:51.782855Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013632018068098:2494], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:51.834677Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013632018068149:3773] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:54.637501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:59.558718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:30:59.558756Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> DataStreams::TestDeleteStream >> DataStreams::TestGetRecordsStreamWithSingleShard >> QuoterWithKesusTest::KesusRecreation [GOOD] >> QuoterWithKesusTest::AllocationStatistics >> DataStreams::TestNonChargeableUser >> DataShardVolatile::DistributedWriteThenImmediateUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenSplit >> TTxDataShardPrefixKMeansScan::BadRequest [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToPosting |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |86.3%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] Test command err: 2026-01-08T15:30:53.817914Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];fline=columnshard.cpp:110;event=initialize_shard;step=OnActivateExecutor; 2026-01-08T15:30:53.891823Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];fline=columnshard.cpp:129;event=initialize_shard;step=initialize_tiring_finished; 2026-01-08T15:30:53.892095Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2026-01-08T15:30:53.899726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:30:53.900012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:30:53.900243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:30:53.900368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:30:53.900473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:30:53.900575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:30:53.900713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:30:53.900857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:30:53.900962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:30:53.901083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:30:53.901222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:30:53.901358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:30:53.901476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:30:53.939170Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2026-01-08T15:30:53.939904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:131;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=Granules; 2026-01-08T15:30:53.939980Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2026-01-08T15:30:53.940161Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:53.940343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:30:53.940417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:30:53.940460Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2026-01-08T15:30:53.940600Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2026-01-08T15:30:53.940684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:30:53.940751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:30:53.940785Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2026-01-08T15:30:53.940951Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:53.941038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:30:53.941083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:30:53.941116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2026-01-08T15:30:53.941227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2026-01-08T15:30:53.941295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:30:53.941347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:30:53.941430Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2026-01-08T15:30:53.941496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:30:53.941546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:30:53.941581Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2026-01-08T15:30:53.941622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:30:53.941667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:30:53.941694Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2026-01-08T15:30:53.941922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:30:53.942030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:30:53.942067Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2026-01-08T15:30:53.942194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:30:53.942236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:30:53.942267Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2026-01-08T15:30:53.942342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:30:53.942389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:30:53.942422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2026-01-08T15:30:53.942478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:30:53.942551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:30:53.942609Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=19;type=CopyBlobIdsToV2; 2026-01-08T15:30:53.942760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T15:30:53.942822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLAS ... lude":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; 2026-01-08T15:31:02.036178Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;local_tx_no=7;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=complete;fline=tiling.cpp:710;message=tiling compaction: returning DoGetUsefulMetric zero; |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_rw/unittest >> KqpSinkTx::SnapshotRO |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgdate-pk_types6-all_types6-index6-pgdate--] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 >> DataStreams::TestReservedStorageMetering [GOOD] >> DataStreams::TestReservedConsumersMetering >> DataStreams::TestShardPagination [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestPgYdbS3TTL::test_s3[table_ttl_pgint4-pk_types4-all_types4-index4-pgint4--] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> KeyValueGRPCService::SimpleWriteReadOverrunV2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadRangeV1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] Test command err: 2026-01-08T15:30:46.150677Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.009064s 2026-01-08T15:30:46.167147Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.007740s 2026-01-08T15:30:46.458719Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013612494350669:2249];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:46.458903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:46.878424Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:46.937122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:46.937202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:46.959657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:47.185106Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:47.187838Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:47.475216Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:47.475917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:47.486526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:47.486547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:47.486658Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:48.149501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:48.329151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:48.895673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72)
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2026-01-08T15:30:49.942831Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013625379254521:3520] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:53.511962Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013642903934782:2085];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:53.526824Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:53.550235Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:53.610741Z node 5 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.015932s 2026-01-08T15:30:53.627613Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:53.735769Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:53.753339Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:53.753413Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:53.784365Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:53.908300Z node 5 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.026589s 2026-01-08T15:30:53.886152Z node 6 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.021890s 2026-01-08T15:30:53.941868Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:53.965954Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:53.965979Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:53.965986Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:53.966056Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:54.224302Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:54.299688Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:54.522530Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:54.553800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:58.516391Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593013642903934782:2085];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:58.516606Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:01.647969Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.035354s 2026-01-08T15:31:01.580052Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013678159908339:2158];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:01.580163Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:01.788362Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.005778s 2026-01-08T15:31:01.817438Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:02.024611Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:02.171716Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:02.229650Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:02.229753Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:02.336484Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:02.534586Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.019919s 2026-01-08T15:31:02.558460Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.026759s 2026-01-08T15:31:02.578708Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.016320s 2026-01-08T15:31:02.578558Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:02.653989Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:02.654017Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:02.654025Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:02.654120Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:02.794498Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:03.119901Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:03.419382Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:03.742883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) |86.3%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> EscapingBasics::EncloseSecretShouldWork [GOOD] >> EscapingBasics::HideSecretsOverEncloseSecretShouldWork [GOOD] >> EscapingBasics::EscapeStringShouldWork [GOOD] >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk [GOOD] >> SplitterBasic::LimitExceed [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/script_execution/py3test >> DataStreams::TestGetShardIterator [GOOD] >> DataStreams::TestGetRecordsWithoutPermission |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EscapeStringShouldWork [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::LimitExceed [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/common/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/common/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> DataShardVolatile::DistributedWriteBrokenLock [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> THiveTest::TestDrain [GOOD] >> THiveTest::TestDownAfterDrain |86.3%| [TA] $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} >> DataStreams::TestPutRecordsCornerCases [GOOD] >> DataStreams::TestPutRecords |86.3%| [TA] {BAZEL_UPLOAD} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} >> IcebergClusterProcessor::ValidateDdlCreationForHadoopWithS3 [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] >> TKesusTest::TestAcquireUpgrade >> DataShardStats::Tli [GOOD] >> DataShardStats::HasSchemaChanges_BTreeIndex >> DataStreams::TestGetRecordsStreamWithSingleShard [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/common/ut/unittest >> DataStreams::TestNonChargeableUser [GOOD] >> DataStreams::TestPutEmptyMessage ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2026-01-08T15:30:44.705840Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013602094753608:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:44.705903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:44.789031Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:45.321002Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:45.420855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:45.420944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:45.522146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:45.584283Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:45.640119Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:45.735344Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:45.976526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:45.976552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:45.976565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:45.976664Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:46.323191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:46.484959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:46.715240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:49.706563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013602094753608:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:49.706641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:50.452048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:50.966562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:30:51.219254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:30:51.381763Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2026-01-08T15:30:51.381798Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2026-01-08T15:30:51.381811Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2026-01-08T15:30:51.381824Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2026-01-08T15:30:51.381836Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2026-01-08T15:30:51.381847Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2026-01-08T15:30:51.381859Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2026-01-08T15:30:51.381870Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2026-01-08T15:30:51.381881Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2026-01-08T15:30:51.381898Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2026-01-08T15:30:51.381912Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2026-01-08T15:30:51.381924Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2026-01-08T15:30:51.381935Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2026-01-08T15:30:51.381946Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2026-01-08T15:30:51.381960Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2026-01-08T15:30:51.386541Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2026-01-08T15:30:51.403321Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,19) wasn't found 2026-01-08T15:30:51.403385Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,7) wasn't found 2026-01-08T15:30:51.403410Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,13) wasn't found 2026-01-08T15:30:51.403432Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,10) wasn't found 2026-01-08T15:30:51.403456Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,16) wasn't found 2026-01-08T15:30:51.403480Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,21) wasn't found 2026-01-08T15:30:51.403545Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,15) wasn't found 2026-01-08T15:30:51.403630Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,20) wasn't found 2026-01-08T15:30:51.403679Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,14) wasn't found 2026-01-08T15:30:51.403725Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,8) wasn't found 2026-01-08T15:30:53.352459Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013643615293232:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:53.352527Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:53.396677Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:53.492917Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:53.504249Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:53.504330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:53.527127Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:53.554387Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:53.623028Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:53.623053Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:53.623060Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:53.623137Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:53.824458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:53.904270Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:54.079154Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:54.099471Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2026-01-08T15:30:54.381508Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:54.401700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:30:54.484377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:30:54.555232Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:30:58.093343Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013662996495208:2185];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:58.093429Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:58.166161Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:58.289424Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:58.316686Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:58.316767Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:58.348766Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:58.468447Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:58.493711Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:58.493741Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:58.493748Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:58.493815Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:58.797600Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:58.926189Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:59.097731Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:59.355443Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:59.688690Z node 7 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [7:7593013667291464523:3508] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:03.487463Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013686700609445:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:03.487519Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:03.566457Z node 12 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007807s 2026-01-08T15:31:03.695086Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:03.909713Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:03.976730Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:04.002102Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:04.002209Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:04.061485Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:04.198385Z node 11 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.036186s 2026-01-08T15:31:04.324503Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:04.324532Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:04.324542Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:04.324658Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:04.421831Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:04.566472Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:04.834754Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:04.918353Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:05.178925Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) |86.3%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> TKesusTest::TestAcquireUpgrade [GOOD] >> TKesusTest::TestAcquireTimeout |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> TSequence::CreateSequence |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> DataShardVolatile::DistributedWriteThenSplit [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> QuoterWithKesusTest::AllocationStatistics [GOOD] >> QuoterWithKesusTest::UpdatesCountersForParentResources >> TSequence::CreateSequence [GOOD] >> TSequence::CreateDropRecreate >> THiveTest::TestDownAfterDrain [GOOD] >> THiveTest::TestDrainAndReconnect |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> DataStreams::TestGetRecordsWithoutPermission [GOOD] >> DataStreams::TestGetRecordsWithCount [GOOD] >> DataStreams::TestInvalidRetentionCombinations >> TSequence::CreateDropRecreate [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed >> KqpSinkTx::SnapshotRO [GOOD] >> KqpSinkTx::SnapshotROInteractive1 >> THiveTest::TestDrainAndReconnect [GOOD] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 >> DataStreams::TestUnsupported [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed [GOOD] >> TSequence::CreateSequenceInsideIndexTableNotAllowed |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::CollectorOverload [GOOD] >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers >> KeyValueGRPCService::SimpleWriteReadRangeV1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadRangeV2 |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 [GOOD] >> THiveTest::TestDeleteTablet >> DataStreams::TestPutRecords [GOOD] |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::CollectorOverload [GOOD] |86.3%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink |86.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |86.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |86.3%| [TA] {RESULT} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |86.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2026-01-08T15:30:55.595081Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013652018755254:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:55.596236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:56.017385Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:56.045203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:56.045284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:56.060865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:56.213265Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:56.245709Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:56.448071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:56.448091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:56.448098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:56.448275Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:56.605216Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:56.771796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:56.911838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:57.185856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:57.206909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2026-01-08T15:30:57.415846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:01.740036Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013678065253785:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:01.740080Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:01.916794Z node 5 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008809s 2026-01-08T15:31:01.970410Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:02.155575Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:02.170035Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:02.170092Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:02.186517Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:02.226472Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:02.278817Z node 5 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006723s 2026-01-08T15:31:02.278429Z node 6 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.007916s 2026-01-08T15:31:02.411022Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:02.411045Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:02.411052Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:02.411125Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:02.557900Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:02.607549Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:02.671509Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:02.871002Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:02.890908Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:03.108035Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:03.167110Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000" } records { sequence_number: "32" shard_id: "shard-000000" } records { sequence_number: "33" shard_id: "shard-000000" } records { sequence_number: "34" shard_id: "shard-000000" } records { sequence_number: "35" shard_id: "shard-000000" } records { sequence_number: "36" shard_id: "shard-000000" } records { sequence_number: "37" shard_id: "shard-000000" } records { sequence_number: "38" shard_id: "shard-000000" } records { sequence_number: "39" shard_id: "shard-000000" } records { sequence_number: "40" shard_id: "shard-000000" } records { sequence_number: "41" shard_id: "shard-000000" } records { sequence_number: "42" shard_id: "shard-000000" } records { sequence_number: "43" shard_id: "shard-000000" } records { sequence_number: "44" shard_id: "shard-000000" } records { sequence_number: "45" shard_id: "shard-000000" } records { sequence_number: "46" shard_id: "shard-000000" } records { sequence_number: "47" shard_id: "shard-000000" } records { sequence_number: "48" shard_id: "shard-000000" } records { sequence_number: "49" shard_id: "shard-000000" } records { sequence_number: "50" shard_id: "shard-000000" } records { sequence_number: "51" shard_id: "shard-000000" } records { sequence_number: "52" shard_id: "shard-000000" } records { sequence_number: "53" shard_id: "shard-000000" } records { sequence_number: "54" shard_id: "shard-000000" } records { sequence_number: "55" shard_id: "shard-000000" } records { sequence_number: "56" shard_id: "shard-000000" } records { sequence_number: "57" shard_id: "shard-000000" } records { sequence_number: "58" shard_id: "shard-000000" } records { sequence_number: "59" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "60" shard_id: "shard-000000" } records { sequence_number: "61" shard_id: "shard-000000" } records { sequence_number: "62" shard_id: "shard-000000" } records { sequence_number: "63" shard_id: "shard-000000" } records { sequence_number: "64" shard_id: "shard-000000" } records { sequence_number: "65" shard_id: "shard-000000" } records { sequence_number: "66" shard_id: "shard-000000" } records { sequence_number: "67" shard_id: "shard-000000" } records { sequence_number: "68" shard_id: "shard-000000" } records { sequence_number: "69" shard_id: "shard-000000" } records { sequence_number: "70" shard_id: "shard-000000" } records { sequence_number: "71" shard_id: "shard-000000" } records { sequence_number: "72" shard_id: "shard-000000" } records { sequence_number: "73" shard_id: "shard-000000" } records { sequence_number: "74" shard_id: "shard-000000" } records { sequence_number: "75" shard_id: "shard-000000" } records { sequence_number: "76" shard_id: "shard-000000" } records { sequence_number: "77" shard_id: "shard-000000" } records { sequence_number: "78" shard_id: "shard-000000" } records { sequence_number: "79" shard_id: "shard-000000" } records { sequence_number: "80" shard_id: "shard-000000" } records { sequence_number: "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } 2026-01-08T15:31:06.742900Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593013678065253785:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:06.742998Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } 2026-01-08T15:31:11.547881Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013720779978698:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:11.548315Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:11.735467Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:11.804282Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:12.062435Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:12.104902Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:12.155981Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:12.156088Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:12.169451Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:12.370886Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008211s 2026-01-08T15:31:12.396872Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.015731s 2026-01-08T15:31:12.530576Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:12.595294Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:12.595321Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:12.595331Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:12.595426Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:12.702484Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:13.073047Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:13.220474Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:13.611404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) |86.4%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |86.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg >> THiveTest::TestDeleteTablet [GOOD] >> THiveTest::TestDeleteOwnerTablets >> TSequence::CreateSequenceInsideIndexTableNotAllowed [GOOD] >> TSequence::CopyTableWithSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] Test command err: 2026-01-08T15:30:45.147847Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013607627444769:2216];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:45.147901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:45.730510Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:45.766240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:45.766350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:45.812037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:45.931364Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:45.998429Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006085s 2026-01-08T15:30:46.033721Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.014045s 2026-01-08T15:30:46.052343Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008146s 2026-01-08T15:30:46.098904Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007144s 2026-01-08T15:30:46.102042Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:46.215004Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:46.373710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:46.373734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:46.373742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:46.374995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:46.736329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:46.863477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:47.148791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:47.166754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2026-01-08T15:30:47.578819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "1" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000007" } records { sequence_number: "1" shard_id: "shard-000007" } records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000007" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000009" } records { sequence_number: "1" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "5" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000008" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000009" } records { sequence_number: "2" shard_id: "shard-000006" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "7" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000008" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000006" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000009" } records { sequence_number: "8" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000009" } records { sequence_number: "9" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000001" } 2026-01-08T15:30:50.148678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013607627444769:2216];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:50.148775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; encryption_type: NONE records { sequence_number: "10" shard_id: "shard-000001" } records { sequence_number: "10" shard_id: "shard-000009" } records { sequence_number: "10" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000005" } records { sequence_number: "4" shard_id: "shard-000008" } records { sequence_number: "11" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000005" } records { sequence_number: "11" shard_id: "shard-000001" } records { sequence_number: "11" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000006" } records { sequence_number: "12" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000007" } records { sequence_number: "7" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000007" } records { sequence_number: "13" shard_id: "shard-000004" } records { sequence_number: "8" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "12" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000008" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000006" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000009" } records { sequence_number: "13" shard_id: "shard-000001" } records { sequence_number: "14" shard_id: "shard-000009" } records { sequence_number: "14" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "15" shard_id: "shard-000001" } records { sequence_number: "15" shard_id: "shard-000009" } records { sequence_number: "15" shard_id: "shard-000004" } records { sequence_number: "9" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000008" } records { sequence_number: "16" shard_id: "shard-000004" } records { sequence_number: "17" shard_id: "shard-000004" } records { sequence_number: "10" shard_id: "shard-000005" } records { sequence_number: "16" shard_id: "shard-000001" } records { sequence_number: "16" shard_id: "shard-000009" } records { sequence_number: "6" shard_id: "shard-000006" } records { sequence_number: "17" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000007" } records { sequence_number: "10" shard_id: "shard-000007" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000007" } records { sequence_number: "18" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000005" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "17" shard_id: "shard-000009" } records { sequence_number: "7" shard_id: "shard-000008" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000006" } r ... :"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270421-105","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270484-106","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":1},"usage":{"quantity":0,"unit":"second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270484-107","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270542-108","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":1},"usage":{"quantity":0,"unit":"second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270542-109","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270611-110","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":0,"unit":"second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270611-111","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1767886270,"finish":1767886270},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886270}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1767886270650-112","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1767886270,"finish":1767886271},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886271}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270650-113","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1767886270,"finish":1767886271},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886271}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886270650-114","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1767886270,"finish":1767886271},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886271}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1767886271706-115","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1767886271,"finish":1767886272},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886272}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886271706-116","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1767886271,"finish":1767886272},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886272}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886271706-117","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1767886271,"finish":1767886272},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886272}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1767886272730-118","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1767886272,"finish":1767886273},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886273}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886272730-119","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1767886272,"finish":1767886273},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886273}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886272730-120","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1767886272,"finish":1767886273},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886273}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1767886273758-121","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1767886273,"finish":1767886274},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886274}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886273758-122","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1767886273,"finish":1767886274},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886274}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886273758-123","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1767886273,"finish":1767886274},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886274}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1767886274804-124","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1767886274,"finish":1767886276},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886276}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886274804-125","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":2,"unit":"second","start":1767886274,"finish":1767886276},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886276}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1767886274804-126","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":112640,"unit":"mbyte*second","start":1767886274,"finish":1767886276},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886276}' |86.4%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> PartitionStats::Collector >> PartitionStats::Collector [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestPutRecords [GOOD] Test command err: 2026-01-08T15:30:47.574405Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013614201013658:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:47.574470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:47.786916Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:48.384428Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:48.492617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:48.492697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:48.603773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:48.820991Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:48.822811Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:48.892952Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:49.210791Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.017507s 2026-01-08T15:30:49.334485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:49.334505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:49.334513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:49.351083Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:50.098139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:50.278087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:50.563518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:55.071084Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013650226682248:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:55.071140Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:55.106597Z node 4 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.013420s 2026-01-08T15:30:55.169012Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:55.264270Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:55.477348Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:55.483424Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:55.552083Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:55.552165Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:55.579072Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:55.750798Z node 6 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.012197s 2026-01-08T15:30:55.811252Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:55.811275Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:55.811282Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:55.811392Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:55.897868Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:56.092875Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:56.192136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:56.351849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:30:56.795875Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:30:56.814056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2026-01-08T15:30:57.126428Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:30:57.233469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE sequence_number: "0" shard_id: "shard-000000" encryption_type: NONE records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } 2026-01-08T15:30:57.340153Z :INFO: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] Starting read session 2026-01-08T15:30:57.340226Z :DEBUG: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] Starting session to cluster null (localhost:7192) 2026-01-08T15:30:57.355350Z :DEBUG: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:30:57.355398Z :DEBUG: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:30:57.355437Z :DEBUG: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] [null] Reconnecting session to cluster null in 0.000000s 2026-01-08T15:30:57.374067Z :DEBUG: [/Root/] [/Root/] [8a289660-6f253aa7-ae22cd18-f41c6bb8] [null] Successfully connected. Initializing session 2026-01-08T15:30:57.378020Z node 4 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:109: new grpc connection 2026-01-08T15:30:57.378047Z node 4 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:131: new session created cookie 1 2026-01-08T15:30:57.390996Z node 4 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer session grpc read done: success# 1, data# { init_request { topics_read_settings { topic: "/Root/st ... 1 (4-4) 2026-01-08T15:31:09.882643Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (5-5) 2026-01-08T15:31:09.882675Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (6-6) 2026-01-08T15:31:09.882714Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (7-7) 2026-01-08T15:31:09.882743Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (8-8) 2026-01-08T15:31:09.890449Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (0-1) 2026-01-08T15:31:09.894459Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {0, 0} (0-0) 2026-01-08T15:31:09.895620Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {1, 0} (1-1) 2026-01-08T15:31:09.908648Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {2, 0} (2-2) 2026-01-08T15:31:09.909733Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 0} (3-3) 2026-01-08T15:31:09.915525Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {4, 0} (4-4) 2026-01-08T15:31:09.916582Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {5, 0} (5-5) 2026-01-08T15:31:09.917626Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {6, 0} (6-6) 2026-01-08T15:31:09.926208Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {7, 0} (7-7) 2026-01-08T15:31:09.941300Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {8, 0} (8-8) 2026-01-08T15:31:09.941411Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] The application data is transferred to the client. Number of messages 9, size 8388611 bytes 2026-01-08T15:31:09.942538Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:31:09.944502Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 0 (0-1) 2026-01-08T15:31:09.944667Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 4 (0-1) 2026-01-08T15:31:09.944765Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (0-2) 2026-01-08T15:31:09.944883Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (3-3) 2026-01-08T15:31:09.966959Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {0, 0} (0-0) 2026-01-08T15:31:09.967041Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {1, 0} (1-1) 2026-01-08T15:31:09.967082Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2026-01-08T15:31:09.967285Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 0} (0-0) 2026-01-08T15:31:09.967319Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {1, 0} (1-1) 2026-01-08T15:31:09.967341Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2026-01-08T15:31:09.967449Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {0, 0} (0-0) 2026-01-08T15:31:09.967494Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {1, 0} (1-1) 2026-01-08T15:31:09.968501Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {2, 0} (2-2) 2026-01-08T15:31:09.968547Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {3, 0} (3-3) 2026-01-08T15:31:09.968586Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] The application data is transferred to the client. Number of messages 4, size 1049088 bytes 2026-01-08T15:31:09.969184Z :INFO: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] Closing read session. Close timeout: 0.000000s 2026-01-08T15:31:09.969253Z :INFO: [/Root/] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:stream_TestPutRecordsCornerCases:0:5:1:0 null:stream_TestPutRecordsCornerCases:3:4:3:0 null:stream_TestPutRecordsCornerCases:4:3:1:0 null:stream_TestPutRecordsCornerCases:2:2:0:0 null:stream_TestPutRecordsCornerCases:1:1:8:0 2026-01-08T15:31:09.969308Z :INFO: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] Counters: { Errors: 0 CurrentSessionLifetimeMs: 262 BytesRead: 9437699 MessagesRead: 17 BytesReadCompressed: 9437699 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:31:09.969411Z :NOTICE: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:31:09.969458Z :DEBUG: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] [null] Abort session to cluster 2026-01-08T15:31:09.970085Z :NOTICE: [/Root/] [/Root/] [1aa71ec3-d25f0fb-7735fd42-3ede6de6] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:31:09.982474Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer user1 session user1_7_1_15004833762512830671_v1 grpc read failed 2026-01-08T15:31:09.982534Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer user1 session user1_7_1_15004833762512830671_v1 grpc closed 2026-01-08T15:31:09.982589Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer user1 session user1_7_1_15004833762512830671_v1 is DEAD 2026-01-08T15:31:12.396979Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013723141938973:2154];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:12.397086Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:12.466391Z node 12 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007790s 2026-01-08T15:31:12.533876Z node 10 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.035797s 2026-01-08T15:31:12.558856Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:12.819794Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:12.827857Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:12.827980Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:12.846522Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:12.880367Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:13.168232Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:13.168260Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:13.168270Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:13.168383Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:13.406461Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:13.446593Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:13.632009Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:13.853215Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:14.276247Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:15.003915Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101)
: Error: Access for stream /Root/stream_TestPutRecords is denied for subject user2@builtin, code: 500018 2026-01-08T15:31:15.277326Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) PutRecordsResponse = encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } PutRecord response = encryption_type: NONE sequence_number: "7" shard_id: "shard-000004" |86.4%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::Collector [GOOD] |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/sys_view/partition_stats/ut/unittest >> DataShardVolatile::DistributedWriteThenReadIterator [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream >> THiveTest::TestDeleteOwnerTablets [GOOD] >> THiveTest::TestDeleteOwnerTabletsMany |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] |86.4%| [TA] $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSequence::CopyTableWithSequence [GOOD] >> TSequence::AlterSequence |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] Test command err: 2026-01-08T15:31:03.666961Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013683092062990:2266];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:03.667020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:04.227310Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:04.406396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:04.422052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:04.449939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:04.711512Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:04.711620Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:04.711951Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:05.083183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:05.083227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:05.083254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:05.083342Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:05.281933Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:05.549468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:05.687945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:06.050403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:11.049010Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:31:11.054450Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:11.215090Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:11.274409Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:11.275485Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:11.275577Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:11.305974Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:11.326643Z node 6 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008368s 2026-01-08T15:31:11.497026Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:11.497049Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:11.497058Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:11.497136Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:11.757006Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:12.030487Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:12.211957Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:12.439942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:12.814006Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:13.026661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:13.062391Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [4:7593013727195050635:3226], for# user2@builtin, access# DescribeSchema 2026-01-08T15:31:13.071884Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [4:7593013727195050638:3227], for# user2@builtin, access# DescribeSchema 2026-01-08T15:31:13.084288Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:17.010903Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013744432551244:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:17.010986Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:17.077913Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:17.199609Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:17.221965Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:17.222061Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:17.245352Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:17.278199Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:17.381423Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:17.381447Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:17.381457Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:17.381560Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:17.744027Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:17.873130Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:18.022286Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:18.161994Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:18.178448Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 10, code: 500080
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 144, storage 0, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 130048, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 1049600, code: 500080 >> TMLPDLQMoverTests::MoveToDLQ_TopicNotExists [GOOD] >> TMLPStorageTests::NextFromEmptyStorage [GOOD] >> TMLPStorageTests::CommitToEmptyStorage [GOOD] >> TMLPStorageTests::UnlockToEmptyStorage [GOOD] >> TMLPStorageTests::ChangeDeadlineEmptyStorage [GOOD] >> TMLPStorageTests::AddMessageToEmptyStorage [GOOD] >> TMLPStorageTests::AddNotFirstMessageToEmptyStorage [GOOD] >> TMLPStorageTests::AddMessageWithSkippedMessage [GOOD] >> TMLPStorageTests::AddMessageWithDelay [GOOD] >> TMLPStorageTests::AddMessageWithBigDelay [GOOD] >> TMLPStorageTests::AddMessageWithZeroDelay [GOOD] >> TMLPStorageTests::AddMessageWithDelay_Unlock [GOOD] >> TMLPStorageTests::NextWithoutKeepMessageOrderStorage [GOOD] >> TMLPStorageTests::NextWithKeepMessageOrderStorage [GOOD] >> TMLPStorageTests::NextWithWriteRetentionPeriod [GOOD] >> TMLPStorageTests::NextWithInfinityRetentionPeriod [GOOD] >> TMLPStorageTests::SkipLockedMessage [GOOD] >> TMLPStorageTests::SkipLockedMessageGroups [GOOD] >> TMLPStorageTests::CommitLockedMessage_WithoutKeepMessageOrder [GOOD] >> TMLPStorageTests::CommitLockedMessage_WithKeepMessageOrder [GOOD] >> TMLPStorageTests::CommitUnlockedMessage >> TMLPStorageTests::CommitUnlockedMessage [GOOD] |86.4%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> TMLPStorageTests::CommitCommittedMessage [GOOD] >> TMLPStorageTests::UnlockLockedMessage_WithoutKeepMessageOrder [GOOD] >> TMLPStorageTests::UnlockLockedMessage_WithKeepMessageOrder [GOOD] >> TMLPStorageTests::UnlockUnlockedMessage [GOOD] >> TMLPStorageTests::UnlockCommittedMessage [GOOD] >> TMLPStorageTests::ChangeDeadlineLockedMessage [GOOD] >> TMLPStorageTests::ChangeDeadlineUnlockedMessage [GOOD] >> TMLPStorageTests::EmptyStorageSerialization [GOOD] >> TMLPStorageTests::StorageSerialization [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_Unlocked [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_Locked [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_Committed [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_DLQ [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_DeadLetterPolicy_Delete [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_WithHole [GOOD] >> TMLPStorageTests::StorageSerialization_WAL_WithMoveBaseTime_Deadline [GOOD] >> TMLPStorageTests::CompactStorage_ByCommittedOffset [GOOD] >> TMLPStorageTests::CompactStorage_ByRetention [GOOD] >> TMLPStorageTests::CompactStorage_ByDeadline [GOOD] >> TMLPStorageTests::CompactStorage_WithDLQ [GOOD] >> TMLPStorageTests::ProccessDeadlines [GOOD] >> TMLPStorageTests::MoveBaseDeadline [GOOD] >> TMLPStorageTests::SlowZone_MoveUnprocessedToSlowZone [GOOD] >> TMLPStorageTests::SlowZone_MoveLockedToSlowZone [GOOD] >> TMLPStorageTests::SlowZone_MoveCommittedToSlowZone [GOOD] >> TMLPStorageTests::SlowZone_MoveDLQToSlowZone [GOOD] >> TMLPStorageTests::SlowZone_MoveToSlowZoneAndLock [GOOD] >> TMLPStorageTests::SlowZone_MoveToSlowZoneAndCommit [GOOD] >> TMLPStorageTests::SlowZone_MoveToSlowZoneAndDLQ [GOOD] >> TMLPStorageTests::SlowZone_Lock [GOOD] >> TMLPStorageTests::SlowZone_Commit_First [GOOD] >> TMLPStorageTests::SlowZone_Commit [GOOD] >> TMLPStorageTests::SlowZone_DLQ [GOOD] >> TMLPStorageTests::SlowZone_CommitToFast [GOOD] >> TMLPStorageTests::SlowZone_CommitAndAdd [GOOD] >> TMLPStorageTests::SlowZone_Retention_1message [GOOD] >> TMLPStorageTests::SlowZone_Retention_2message [GOOD] >> TMLPStorageTests::SlowZone_Retention_3message [GOOD] >> TMLPStorageTests::ChangeDeadLetterPolicy_Delete [GOOD] >> TMLPStorageTests::ChangeDeadLetterPolicy_Unspecified [GOOD] >> TMLPTests::CreateWithRetentionStorage |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |86.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |86.4%| [TA] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.4%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> KqpSinkTx::SnapshotROInteractive1 [GOOD] >> KqpSinkTx::SnapshotROInteractive2 >> TDatabaseResolverTests::Ydb_Dedicated >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> KqpPg::TypeCoercionInsert-useSink |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/knn/unittest |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/knn/unittest >> TSequence::AlterSequence [GOOD] >> TSequence::AlterTableSetDefaultFromSequence >> QuoterWithKesusTest::UpdatesCountersForParentResources [GOOD] >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/actors/ut/unittest >> KqpPg::InsertNoTargetColumns_Simple+useSink >> KqpPg::ReadPgArray >> KqpPg::ReadPgArray [GOOD] >> KqpPg::TableArrayInsert+useSink >> KqpPg::CreateTableSerialColumns+useSink >> KqpPg::NoTableQuery+useSink >> KqpPg::TypeCoercionBulkUpsert >> KqpPg::EmptyQuery+useSink >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink >> KqpPg::InsertFromSelect_Simple+useSink >> test_canonical_records.py::test_topic [GOOD] >> KeyValueGRPCService::SimpleWriteReadRangeV2 [GOOD] >> KeyValueGRPCService::SimpleWriteListRangeV1 >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] >> KqpSinkMvcc::OltpNamedStatement >> KqpPg::JoinWithQueryService+StreamLookup >> DataShardVolatile::DistributedWriteThenReadIteratorStream [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |86.5%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> KqpPg::CreateTableBulkUpsertAndRead >> TSequence::AlterTableSetDefaultFromSequence [GOOD] |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |86.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut >> THiveTest::TestDeleteOwnerTabletsMany [GOOD] >> THiveTest::TestDeleteTabletWithFollowers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::AlterTableSetDefaultFromSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:31:14.214063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:31:14.214161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:31:14.214218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:31:14.214274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:31:14.214369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:31:14.214411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:31:14.214500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:31:14.214599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:31:14.215463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:31:14.215760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:31:14.387314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:31:14.387382Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:14.401950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:31:14.402339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:31:14.402550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:31:14.417568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:31:14.418021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:31:14.418815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:31:14.422550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:31:14.427677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:31:14.427917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:31:14.429049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:31:14.429112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:31:14.429222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:31:14.429270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:31:14.429369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:31:14.429515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:31:14.604660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.605668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.605810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.605894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.605973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.606917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.607009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:31:14.607088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... Latency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 2096 } } CommitVersion { Step: 5000050 TxId: 114 } debug: NTableState::TProposedWaitParts operationId# 114:0 2026-01-08T15:31:30.865022Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:31:30.866052Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877761, Sender [7:1461:3396], Recipient [7:128:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:31:30.866098Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5345: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:31:30.866127Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6167: Pipe server connected, at tablet: 72057594046678944 2026-01-08T15:31:30.869814Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269551620, Sender [7:1399:3343], Recipient [7:128:2153]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 1399 RawX2: 30064774415 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2026-01-08T15:31:30.869990Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5280: StateWork, processing event TEvDataShard::TEvSchemaChanged 2026-01-08T15:31:30.870132Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5926: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 1399 RawX2: 30064774415 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2026-01-08T15:31:30.870198Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1837: TOperation FindRelatedPartByTabletId, TxId: 114, tablet: 72075186233409549, partId: 0 2026-01-08T15:31:30.870433Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 114:0, at schemeshard: 72057594046678944, message: Source { RawX1: 1399 RawX2: 30064774415 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2026-01-08T15:31:30.870524Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1081: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2026-01-08T15:31:30.870679Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 1399 RawX2: 30064774415 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2026-01-08T15:31:30.870789Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:673: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 114:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2026-01-08T15:31:30.870846Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:710: all shard schema changes has been received, operationId: 114:0, at schemeshard: 72057594046678944 2026-01-08T15:31:30.870909Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:722: send schema changes ack message, operation: 114:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2026-01-08T15:31:30.870991Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 114:0 129 -> 240 2026-01-08T15:31:30.871211Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:31:30.872634Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.872853Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2026-01-08T15:31:30.872922Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.875801Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2026-01-08T15:31:30.875862Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.876028Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2026-01-08T15:31:30.876064Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.876204Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2026-01-08T15:31:30.876254Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.876311Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:285: Activate send for 114:0 2026-01-08T15:31:30.876459Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:643: Send to actor: [7:1399:3343] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 114 at schemeshard: 72057594046678944 2026-01-08T15:31:30.876626Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435072, Sender [7:128:2153], Recipient [7:128:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2026-01-08T15:31:30.876670Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5265: StateWork, processing event TEvPrivate::TEvProgressOperation 2026-01-08T15:31:30.876755Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 114:0, at schemeshard: 72057594046678944 2026-01-08T15:31:30.876819Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 114:0 ProgressState 2026-01-08T15:31:30.876979Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:31:30.877021Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#114:0 progress is 1/1 2026-01-08T15:31:30.877077Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2026-01-08T15:31:30.877131Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#114:0 progress is 1/1 2026-01-08T15:31:30.877178Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2026-01-08T15:31:30.877247Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 114, ready parts: 1/1, is published: true 2026-01-08T15:31:30.877328Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:800:2767] message: TxId: 114 2026-01-08T15:31:30.877394Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2026-01-08T15:31:30.877463Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 114:0 2026-01-08T15:31:30.877509Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 114:0 2026-01-08T15:31:30.877694Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 42] was 3 2026-01-08T15:31:30.880773Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:31:30.880926Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:643: Send to actor: [7:800:2767] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 114 at schemeshard: 72057594046678944 2026-01-08T15:31:30.881183Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2026-01-08T15:31:30.881264Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [7:1427:3363] 2026-01-08T15:31:30.881514Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877764, Sender [7:1429:3365], Recipient [7:128:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:31:30.881556Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5346: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:31:30.881584Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6215: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 114 TestModificationResults wait txId: 115 2026-01-08T15:31:30.882963Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122432, Sender [7:1469:3404], Recipient [7:128:2153]: {TEvModifySchemeTransaction txid# 115 TabletId# 72057594046678944} 2026-01-08T15:31:30.883056Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5259: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2026-01-08T15:31:30.886659Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table3" Columns { Name: "value" DefaultFromSequence: "/MyRoot/seq1" } } } TxId: 115 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:31:30.886986Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:522: TAlterTable Propose, path: /MyRoot/Table3, pathId: , opId: 115:0, at schemeshard: 72057594046678944 2026-01-08T15:31:30.888059Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 115:1, propose status:StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, at schemeshard: 72057594046678944 2026-01-08T15:31:30.888372Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:31:30.891763Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 115, response: Status: StatusInvalidParameter Reason: "Column \'value\' is of type Bool but default expression is of type Int64" TxId: 115 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:31:30.892109Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 115, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, operation: ALTER TABLE, path: /MyRoot/Table3 2026-01-08T15:31:30.892193Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 115, wait until txId: 115 |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_sequence/unittest >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink >> DataStreams::TestListShards1Shard [GOOD] >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink >> KqpPg::EmptyQuery+useSink [GOOD] >> KqpPg::EmptyQuery-useSink >> THiveTest::TestDeleteTabletWithFollowers [GOOD] >> THiveTest::TestCreateTabletBeforeLocal |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |86.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2026-01-08T15:31:06.141136Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013696313572929:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:06.141201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:06.799104Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:07.077953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:07.078064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:07.102023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:07.301205Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:07.303442Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:07.303523Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:07.442685Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.016674s 2026-01-08T15:31:07.470943Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.008152s 2026-01-08T15:31:07.587015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:07.587036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:07.587048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:07.587125Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:07.795611Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:08.046663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:08.274369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:08.509690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:09.125403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2026-01-08T15:31:09.309554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:09.449015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:31:09.516851Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2026-01-08T15:31:09.516881Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2026-01-08T15:31:09.516898Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1767886268889-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":1,"unit":"second","start":1767886268,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886269}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1767886269375-2","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1767886269,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037890","source_wt":1767886269}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1767886269367-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1767886269,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886269}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1767886268889-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":1,"unit":"second","start":1767886268,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886269}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1767886269375-2","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1767886269,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037890","source_wt":1767886269}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1767886269367-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1767886269,"finish":1767886269},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root","Category":"Topic"},"version":"v1","source_id":"72075186224037888","source_wt":1767886269}' 2026-01-08T15:31:13.531196Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013727486689531:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:13.531419Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:13.680764Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:13.910553Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_e ... Q.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.603765 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.617902 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.618017 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.629244 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.629408 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2026-01-08T15:31:21.655516Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:21.762972Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) E0000 00:00:1767886281.852619 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.853365 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2026-01-08T15:31:21.872135Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) E0000 00:00:1767886281.949942 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886281.950075 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2026-01-08T15:31:21.968186Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) E0000 00:00:1767886282.080714 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886282.080858 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886282.110182 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886282.110362 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2026-01-08T15:31:22.186307Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:31:22.237468Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2026-01-08T15:31:22.240904Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2026-01-08T15:31:22.242975Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2026-01-08T15:31:22.243003Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2026-01-08T15:31:22.243015Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2026-01-08T15:31:22.243026Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2026-01-08T15:31:22.285632Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2026-01-08T15:31:22.285700Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2026-01-08T15:31:22.285770Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2026-01-08T15:31:22.285802Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found 2026-01-08T15:31:22.285827Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2026-01-08T15:31:22.285856Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found E0000 00:00:1767886282.301116 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886282.301282 152200 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2026-01-08T15:31:26.219416Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013785329733973:2081];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:26.220201Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:26.249773Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:26.450576Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:26.465665Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:26.489961Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:26.490056Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:26.507727Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:26.602806Z node 12 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.023711s 2026-01-08T15:31:26.614288Z node 12 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.005158s 2026-01-08T15:31:26.669474Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:26.731772Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:26.731797Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:26.731809Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:26.731925Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:27.197202Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:27.229151Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:27.361417Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:27.653797Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) E0000 00:00:1767886287.919361 153439 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886287.939833 153439 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886287.951192 153439 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886287.965842 153439 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1767886287.979734 153439 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn |86.5%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> KqpPg::CreateTableSerialColumns+useSink [GOOD] >> KqpPg::CreateTableSerialColumns-useSink >> DataShardStats::HasSchemaChanges_BTreeIndex [GOOD] >> DataShardStats::HasSchemaChanges_ByKeyFilter >> TDatabaseResolverTests::Ydb_Serverless_Timeout >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] >> THiveTest::TestCreateTabletBeforeLocal [GOOD] >> THiveTest::TestCreateTabletReboots >> KqpSinkTx::SnapshotROInteractive2 [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] Test command err: 2026-01-08T15:31:35.853526Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Ydb database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': Connection timeout |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/actors/ut/unittest >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types2-all_types2-index2---ASYNC] [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_canonical_records.py::test_topic [GOOD] |86.5%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/audit/py3test >> KqpPg::JoinWithQueryService+StreamLookup [GOOD] >> KqpPg::Insert_Serial+useSink >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink >> TDatabaseResolverTests::DataStreams_Dedicated >> TDatabaseResolverTests::DataStreams_Dedicated [GOOD] >> TDatabaseResolverTests::ClickHouse_PermissionDenied >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2026-01-08T15:31:05.222233Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013694157870745:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:05.222306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:05.261060Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:05.588409Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:05.644943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:05.645056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:05.693657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:05.758350Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:05.878938Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639257 Duration# 0.010905s 2026-01-08T15:31:05.980633Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.006675s 2026-01-08T15:31:05.973841Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.024601s 2026-01-08T15:31:05.994545Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007647s 2026-01-08T15:31:06.073096Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:06.160104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:06.160123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:06.160129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:06.160200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:06.210771Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:06.652680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:06.855287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:07.143385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:07.302478Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639258 Duration# 0.008315s 2026-01-08T15:31:07.566791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:31:07.657763Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2026-01-08T15:31:07.657828Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2026-01-08T15:31:07.657846Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2026-01-08T15:31:07.657863Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2026-01-08T15:31:07.672625Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2026-01-08T15:31:07.672742Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2026-01-08T15:31:07.672778Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2026-01-08T15:31:07.672823Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2026-01-08T15:31:11.342855Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013720090334320:2154];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:11.342910Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:11.422012Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:11.621680Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:11.647005Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:11.668792Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:11.668872Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:11.682707Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:11.822942Z node 6 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.007998s 2026-01-08T15:31:11.925974Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:11.926006Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:11.926013Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:11.926090Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:12.088367Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:12.398575Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:12.444057Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:12.586713Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:12.793508Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:12.984304Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:13.042131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:31:13.071010Z node 4 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2026-01-08T15:31:13.071041Z node 4 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2026-01-08T15:31:13.071053Z node 4 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2026-01-08T15:31:13.071067Z node 4 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2026-01-08T15:31:13.083710Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2026-01-08T15:31:13.083847Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2026-01-08T15:31:13.083883Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2026-01-08T15:31:16.881476Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.020027s 2026-01-08T15:31:16.878911Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013740367394351:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:16.879854Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:16.908153Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:17.041475Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:17.063584Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:17.063668Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:17.084195Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:17.086537Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:17.138903Z node 8 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.012465s 2026-01-08T15:31:17.138903Z node 9 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.007161s 2026-01-08T15:31:17.227086Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:17.227111Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:17.227119Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:17.227191Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:17.561487Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:17.633839Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:17.647229Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:17.867918Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:17.888311Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:18.099612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:18.201064Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:31:18.297995Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:31:18.337623Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2026-01-08T15:31:18.342467Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2026-01-08T15:31:18.342498Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2026-01-08T15:31:18.342538Z node 7 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2026-01-08T15:31:21.966641Z node 10 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.005681s 2026-01-08T15:31:21.999322Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013763857916945:2089];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:21.999394Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:22.284394Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:22.522202Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:22.548235Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:22.548325Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:22.579803Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:22.580803Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:22.806446Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:22.806469Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:22.806476Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:22.806616Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:23.058696Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:23.067516Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:23.115603Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:23.227115Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:23.445942Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:23.466606Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2026-01-08T15:31:27.000481Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7593013763857916945:2089];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:27.000568Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |86.5%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest >> TTxDataShardPrefixKMeansScan::BuildToPosting [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToPostingWithOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::SnapshotROInteractive2 [GOOD] Test command err: Trying to start YDB, gRPC: 25442, MsgBus: 26452 2026-01-08T15:31:07.121992Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013699982301465:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:07.122093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:07.522272Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:07.607254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:07.607363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:07.658981Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:07.664740Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013699982301437:2081] 1767886267120042 != 1767886267120045 2026-01-08T15:31:07.683442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:07.752313Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:07.812978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:07.813010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:07.813027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:07.813166Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:08.134619Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:08.455961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:08.467428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:11.178949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013717162171512:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:11.179122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:11.180061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013717162171524:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:11.180103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013717162171525:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:11.180139Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:11.183996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:11.208687Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013717162171528:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:11.364474Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013717162171579:2539] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:11.688838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:11.973873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:12.128776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013699982301465:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:12.128895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:13.326970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:16.088852Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=1&id=NGQ3NTJhZi1lZWZjNjYzZi03NWU3MDBmOS1jNThlNjFlNg==, ActorId: [1:7593013734342048699:2966], ActorState: ExecuteState, LegacyTraceId: 01kef3qhcmbbc2nvex87w47t6s, Create QueryResponse for error on request, msg: status# GENERIC_ERROR issues# { position { row: 3 column: 29 } message: "Operation \'Upsert\' can\'t be performed in read only transaction" end_position { row: 3 column: 29 } issue_code: 2008 severity: 1 } trace_id#
:3:29: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 5808, MsgBus: 18278 2026-01-08T15:31:17.676516Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:17.676737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:31:17.749855Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:17.754442Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013743272958418:2081] 1767886277606904 != 1767886277606907 2026-01-08T15:31:17.758989Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:17.759069Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:17.822217Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:17.873670Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:18.030257Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:18.030283Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:18.030290Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:18.030398Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:18.647641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:18.654179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:18.672758Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:21.412436Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013760452828475:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:21.412521Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:21.412979Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013760452828494:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:21.413053Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:21.413274Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013760452828497:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:21.417707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:21.430382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:31:21.430657Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013760452828499:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:21.501626Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013760452828550:2537] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:21.576378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:21.641362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:22.912281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) Trying to start YDB, gRPC: 17521, MsgBus: 62616 2026-01-08T15:31:26.111759Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593013781658799242:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:26.111838Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:26.143958Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:26.248563Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:26.249837Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [3:7593013781658799202:2081] 1767886286110040 != 1767886286110043 2026-01-08T15:31:26.275441Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:26.275528Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:26.317542Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:26.334736Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:26.415467Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:26.415500Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:26.415510Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:26.415598Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:26.951028Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:27.118888Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:30.481791Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013798838669273:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:30.481909Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:30.482469Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013798838669285:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:30.482521Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013798838669286:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:30.482829Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:30.488097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:30.508280Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593013798838669289:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:30.557901Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593013798838669340:2536] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:30.637812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:30.709181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:31.497329Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7593013781658799242:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:31.503866Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:32.013798Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/tx/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] Test command err: 2026-01-08T15:31:37.784461Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed ClickHouse database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-clickhouse/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-clickhouse.viewer`. |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/actors/ut/unittest >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink >> DataShardVolatile::DistributedWriteThenScanQuery [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex >> KeyValueGRPCService::SimpleWriteListRangeV1 [GOOD] >> KeyValueGRPCService::SimpleWriteListRangeV2 >> TDatabaseResolverTests::DataStreams_Serverless >> TDatabaseResolverTests::DataStreams_Serverless [GOOD] >> TDatabaseResolverTests::DataStreams_PermissionDenied >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt [GOOD] >> QuoterWithKesusTest::CanKillKesusWhenUsingIt >> KqpPg::EmptyQuery-useSink [GOOD] >> KqpPg::DuplicatedColumns+useSink >> TxUsage::WriteToTopic_Demo_41_Table >> TMLPTests::CreateWithRetentionStorage [GOOD] >> TMLPTests::AddWithRetentionStorage >> TKesusTest::TestAcquireTimeout [GOOD] >> TKesusTest::TestAcquireSharedBlocked ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] Test command err: 2026-01-08T15:31:39.570874Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': you have no permission to resolve database id into database endpoint. |86.5%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/actors/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> KqpPg::CreateTableSerialColumns-useSink [GOOD] >> KqpPg::DropIndex >> TxUsage::WriteToTopic_Demo_12_Table >> TKesusTest::TestAcquireSharedBlocked [GOOD] >> TKesusTest::TestAcquireTimeoutAfterReboot >> BasicUsage::CreateTopicWithCustomName >> KqpSinkMvcc::OltpNamedStatement [GOOD] >> KqpSinkMvcc::OltpMultiSinksNoSinks >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS [GOOD] >> DataStreams::TestGetRecordsStreamWithMultipleShards >> THiveTest::TestCreateTabletReboots [GOOD] >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |86.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |86.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |86.6%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> KqpPg::Insert_Serial+useSink [GOOD] >> KqpPg::Insert_Serial-useSink >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 >> TxUsage::WriteToTopic_Demo_11_Table >> THiveTest::TestCreateTabletAndReassignGroups3 [GOOD] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots |86.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/indexes/ydb-core-kqp-ut-olap-indexes |86.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/indexes/ydb-core-kqp-ut-olap-indexes |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/indexes/ydb-core-kqp-ut-olap-indexes >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink [GOOD] >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked >> KqpPg::DropIndex [GOOD] >> KqpPg::CreateUniqPgColumn+useSink >> KqpPg::DuplicatedColumns+useSink [GOOD] >> KqpPg::DuplicatedColumns-useSink >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |86.6%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut >> DataStreams::TestGetRecordsStreamWithMultipleShards [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> KqpPg::Insert_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText+useSink |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> TTxDataShardPrefixKMeansScan::BuildToPostingWithOverlap [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToBuild >> DataShardVolatile::DistributedWriteWithAsyncIndex [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots [GOOD] >> THiveTest::TestDeleteTabletError >> KeyValueGRPCService::SimpleWriteListRangeV2 [GOOD] >> KeyValueGRPCService::SimpleGetStorageChannelStatusV1 |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types2-all_types2-index2---ASYNC] [GOOD] |86.6%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> KqpPg::DuplicatedColumns-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder+useSink >> BasicUsage::CreateTopicWithCustomName [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_MoveDeadLetterPolicy >> KqpSinkMvcc::OltpMultiSinksNoSinks [GOOD] >> KqpSinkMvcc::OltpMultiSinks >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink >> THiveTest::TestDeleteTabletError [GOOD] >> THiveTest::TestDeleteTabletWithRestartAndRetry |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] |86.6%| [TM] {BAZEL_UPLOAD} ydb/tests/functional/sqs/messaging/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> TxUsage::WriteToTopic_Demo_12_Query >> KqpPg::InsertValuesFromTableWithDefaultText+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink >> THiveTest::TestDeleteTabletWithRestartAndRetry [GOOD] >> THiveTest::TestCreateTabletChangeToExternal ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] Test command err: 2026-01-08T15:31:05.615669Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013691588145597:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:05.615826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:05.834604Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.035961s 2026-01-08T15:31:05.874396Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.008007s 2026-01-08T15:31:06.466835Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:06.631510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:06.631610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:06.658653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:06.786168Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:06.945535Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:06.961306Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:07.006521Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.008911s 2026-01-08T15:31:07.318656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:07.318688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:07.318695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:07.318794Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:07.773172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:07.889290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:08.210935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:08.746762Z node 1 :PERSQUEUE ERROR: partition_read.cpp:831: [72075186224037888][Partition][0][StateIdle] reading from too big offset - topic stream_TestGetRecordsStreamWithSingleShard partition 0 client $without_consumer EndOffset 30 offset 100000 2026-01-08T15:31:08.746849Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'stream_TestGetRecordsStreamWithSingleShard' partition: 0 messageNo: 0 requestId: error: trying to read from future. ReadOffset 100000, 0 EndOffset 30 2026-01-08T15:31:13.098703Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013728728737514:2084];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:13.098769Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:13.161250Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:13.327412Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:13.338663Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:13.338754Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:13.383951Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:13.399154Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:13.729226Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:13.729250Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:13.729259Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:13.729348Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:14.129851Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:14.142727Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:14.297815Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:14.536641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:18.099858Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593013728728737514:2084];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:18.099968Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:28.262482Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:31:28.262515Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:42.324587Z node 7 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7593013850493088841:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:42.324665Z node 7 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:42.376685Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:42.649978Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:42.657908Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:42.669032Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:42.669106Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:42.694958Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:42.827015Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:42.827034Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:42.827040Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:42.827138Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:43.168576Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:43.263147Z node 7 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:43.336021Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:43.338397Z node 7 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:43.655563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:31:49.999532Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593013883616790040:2163];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:49.999581Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:50.109730Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:50.286632Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:50.296892Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:50.320842Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:50.320943Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:50.329577Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:50.515449Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:50.515476Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:50.515487Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:50.515606Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:50.821444Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:50.832942Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:50.947741Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:31:51.033325Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:51.299860Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) |86.6%| [TM] {BAZEL_UPLOAD} ydb/services/datastreams/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/quoter/ut/unittest >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] Test command err: 2026-01-08T15:29:01.422977Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.423114Z node 1 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.425011Z node 1 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.425078Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.445757Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.445986Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.446135Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.446215Z node 2 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:824: [/Path/KesusName]: Disconnected from tablet 2026-01-08T15:29:01.446240Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.446559Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.468191Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.468319Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.468681Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.468718Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.469020Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.469108Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.469131Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.469301Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.469343Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.469360Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.469494Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.469539Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.469565Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.469711Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.469759Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:809: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2026-01-08T15:29:01.469781Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.470004Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.470064Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:824: [/Path/KesusName]: Disconnected from tablet 2026-01-08T15:29:01.470117Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.470361Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.490768Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.490883Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.491304Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "/resource" 2026-01-08T15:29:01.491382Z node 4 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:493: [/Path/KesusName]: Resource "/resource" has incorrect name. Maybe this was some error on client side. 2026-01-08T15:29:01.491423Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:356: [/Path/KesusName]: ProxySession("/resource", Error: GenericError) 2026-01-08T15:29:01.491496Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.491593Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "resource//resource" 2026-01-08T15:29:01.491639Z node 4 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:493: [/Path/KesusName]: Resource "resource//resource" has incorrect name. Maybe this was some error on client side. 2026-01-08T15:29:01.491677Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:356: [/Path/KesusName]: ProxySession("resource//resource", Error: GenericError) 2026-01-08T15:29:01.503825Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.503943Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.504074Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "res" 2026-01-08T15:29:01.504432Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.514143Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:839: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 } } } }) 2026-01-08T15:29:01.514227Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:848: [/Path/KesusName]: Initialized new session with resource "res" 2026-01-08T15:29:01.514278Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:382: [/Path/KesusName]: ProxySession("res", 42) 2026-01-08T15:29:01.514369Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res", Normal, {0: Front(20, 2)} }]) 2026-01-08T15:29:01.522590Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1091: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2026-01-08T15:29:01.522707Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1150: [/Path/KesusName]: Connecting to kesus 2026-01-08T15:29:01.522994Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "res0" 2026-01-08T15:29:01.523220Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.523547Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:839: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2026-01-08T15:29:01.523593Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:848: [/Path/KesusName]: Initialized new session with resource "res0" 2026-01-08T15:29:01.523652Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:382: [/Path/KesusName]: ProxySession("res0", 42) 2026-01-08T15:29:01.523781Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res0", Normal, {0: Front(1, 2)} }]) 2026-01-08T15:29:01.523907Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "res1" 2026-01-08T15:29:01.523997Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:517: [/Path/KesusName]: Subscribe on resource "res1" 2026-01-08T15:29:01.524224Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:839: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2026-01-08T15:29:01.524263Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:848: [/Path/KesusName]: Initialized new session with resource "res1" 2026-01-08T15:29:01.524299Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:382: [/Path/KesusName]: ProxySession("res1", 43) 2026-01-08T15:29:01.524346Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2026-01-08T15:29:01.524454Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:486: [/Path/KesusName]: ProxyRequest "res2" 2026-01-08T15:29:01.524539Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:517: [/Path/KesusName]: Subscribe on resource "res2" 2026-01-08T15:29:01.524721Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:839: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2026-01-08T15:29:01.524758Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:848: [/Path/KesusName]: Initialized new session with resource "res2" 2026-01-08T15:29:01.524793Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:382: [/Path/KesusName]: ProxySession("res2", 44) 2026-01-08T15:29:01.524858Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res2", Normal, {0: Front(1, 2)} }]) 2026-01-08T15:29:01.525086Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:732: [/Path/KesusName]: ProxyStats([{"res1", Consumed: 0, Queue: 5}]) 2026-01-08T15:29:01.525128Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:753: [/Path/KesusName]: Set info for resource "res1": { Available: 1, QueueWeight: 5 } 2026-01-08T15:29:01.525152Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:662: [/Path/KesusName]: Activate session to "res1". Connected: 1 2026-01-08T15:29:01.528472Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:585: [/Path/KesusName]: UpdateConsumptionState({ ResourcesInfo { ResourceId: 43 ConsumeResource: true Amount: inf } ActorID { RawX1: 5 RawX2: 25769805828 } }) 2026-01-08T15:29:01.528604Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2026-01-08T15:29:01.528978Z node 6 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:824: [/Path/KesusName]: Disconnected from tablet 2026-01-08T15:29:01.529017Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1148: [/Path/KesusName]: Reconnecting to kesus 2026-01-08T15:29:01.529170Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:640: [/Path/KesusName]: Mark "res1" for offline allocation. Connected: 0, SessionIsActive: 1, AverageDuration: 0.100000s, AverageAmount: 0.5 2026-01-08T15:29:01.529223Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:614: [/Path/KesusName]: Schedule offline allocation in 0.000000s: [{ "res1", 0.5 }] 2026-01-08T15:29:01.529454Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:800: [/Path/KesusName]: Successfully connected to tablet 2026-01-08T15:29:01.529794Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:839: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: ... pp:753: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -1.000316619, QueueWeight: 5 } 2026-01-08T15:31:44.619374Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:44.619819Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.700887Z node 51 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [50:7593013856000236924:2292]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2026-01-08T15:31:44.702559Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:873: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2026-01-08T15:31:44.702621Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:882: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2026-01-08T15:31:44.702668Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:44.702724Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.714535Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1303: Feed resource "Resource". Balance: 0. FreeBalance: 0 2026-01-08T15:31:44.714596Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2026-01-08T15:31:44.814000Z 2026-01-08T15:31:44.714617Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1338: Allocate resource "Resource" 2026-01-08T15:31:44.718442Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:732: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 0, Queue: 5}]) 2026-01-08T15:31:44.718494Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:753: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -0.0003166186659, QueueWeight: 5 } 2026-01-08T15:31:44.718540Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:44.718886Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.802654Z node 51 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [50:7593013856000236924:2292]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2026-01-08T15:31:44.810524Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:873: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2026-01-08T15:31:44.810574Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:882: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2026-01-08T15:31:44.810636Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Front(0.9996833813, 2)} }]) 2026-01-08T15:31:44.810895Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.814411Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1303: Feed resource "Resource". Balance: 0.9996833813. FreeBalance: 0.9996833813 2026-01-08T15:31:44.814458Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2026-01-08T15:31:44.914000Z 2026-01-08T15:31:44.814480Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1338: Allocate resource "Resource" 2026-01-08T15:31:44.814559Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:275: Charge "Resource" for 5. Balance: 0.9996833813. FreeBalance: 0.9996833813. TicksToFullfill: 5.001583595. DurationToFullfillInUs: 500158.3595. TimeToFullfill: 2026-01-08T15:31:44.411705Z. Now: 2026-01-08T15:31:44.814190Z. LastAllocated: 2026-01-08T15:31:43.911547Z 2026-01-08T15:31:44.815072Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:732: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 5, Queue: 0}]) 2026-01-08T15:31:44.815111Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:753: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -4.000316619, QueueWeight: 0 } 2026-01-08T15:31:44.815151Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:44.817870Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.907446Z node 51 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [50:7593013856000236924:2292]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2026-01-08T15:31:44.908017Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:873: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2026-01-08T15:31:44.908065Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:882: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2026-01-08T15:31:44.908126Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:44.908379Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:44.916441Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1303: Feed resource "Resource". Balance: 0. FreeBalance: 0 2026-01-08T15:31:44.959004Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:44.959049Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:45.002850Z node 51 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [50:7593013856000236924:2292]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2026-01-08T15:31:45.003462Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:873: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2026-01-08T15:31:45.003513Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:882: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2026-01-08T15:31:45.003562Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:45.003754Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:45.003806Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1303: Feed resource "Resource". Balance: 0. FreeBalance: 0 2026-01-08T15:31:45.118811Z node 51 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [50:7593013856000236924:2292]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2026-01-08T15:31:45.122991Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:873: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2026-01-08T15:31:45.123067Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:882: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2026-01-08T15:31:45.123126Z node 50 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1027: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2026-01-08T15:31:45.123552Z node 50 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1087: ProxyUpdate for quoter /dc-1/KesusQuoter 2026-01-08T15:31:45.123585Z node 50 :QUOTER_SERVICE TRACE: quoter_service.cpp:1303: Feed resource "Resource". Balance: 0. FreeBalance: 0 2026-01-08T15:31:45.957648Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:45.957696Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:46.594553Z node 50 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[50:7593013847410301175:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:46.594661Z node 50 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:46.627707Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594037968897] send [50:7593013847410301184:2111] 2026-01-08T15:31:46.627756Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594037968897] push event to server [50:7593013847410301184:2111] 2026-01-08T15:31:46.959747Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:46.959790Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:47.882516Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594037968897] send [50:7593013847410301184:2111] 2026-01-08T15:31:47.882558Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594037968897] push event to server [50:7593013847410301184:2111] 2026-01-08T15:31:47.959830Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:47.959902Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:48.961221Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:48.961266Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:49.963843Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:49.963900Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:50.955552Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:50.955590Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] 2026-01-08T15:31:51.480729Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594037932033] send [50:7593013847410301140:2094] 2026-01-08T15:31:51.480760Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594037932033] push event to server [50:7593013847410301140:2094] 2026-01-08T15:31:51.694013Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594037968897] send [50:7593013847410301184:2111] 2026-01-08T15:31:51.694059Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594037968897] push event to server [50:7593013847410301184:2111] 2026-01-08T15:31:51.957630Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [50:7593013851705269165:2446] 2026-01-08T15:31:51.957685Z node 50 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [50:7593013851705269165:2446] |86.6%| [TM] {BAZEL_UPLOAD} ydb/core/quoter/ut/unittest >> KqpPg::CreateUniqPgColumn+useSink [GOOD] >> KqpPg::CreateUniqPgColumn-useSink |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |86.6%| [TM] {RESULT} ydb/core/quoter/ut/unittest |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot >> TMLPTests::AddWithRetentionStorage [GOOD] >> TMLPTests::SetRetentionStorage |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> TSelectFromViewTest::ReadTestCasesFromFiles [GOOD] >> TSelectFromViewTest::QueryCacheIsUpdated >> TxUsage::WriteToTopic_Demo_41_Table [GOOD] >> THiveTest::TestCreateTabletChangeToExternal [GOOD] >> THiveTest::TestDownCounterDeleteNode >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] >> DataShardStats::HasSchemaChanges_ByKeyFilter [GOOD] >> DataShardStats::HasSchemaChanges_Columns >> TxUsage::WriteToTopic_Demo_42_Table >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateQuery >> KqpPg::InsertFromSelect_NoReorder+useSink [GOOD] >> KqpPg::DropTablePg >> THiveTest::TestDownCounterDeleteNode [GOOD] |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink >> TControlPlaneProxyTest::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated >> KqpPg::InsertValuesFromTableWithDefaultText-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestDownCounterDeleteNode [GOOD] Test command err: 2026-01-08T15:30:55.049641Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:348} Bootstrap 2026-01-08T15:30:55.116787Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2026-01-08T15:30:55.117126Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:235} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2026-01-08T15:30:55.118146Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:124} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2026-01-08T15:30:55.118588Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:36} StartLocalProxy GroupId# 0 HasGroupInfo# true GroupInfoGeneration# 1 2026-01-08T15:30:55.119673Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:73:2076] ControllerId# 72057594037932033 2026-01-08T15:30:55.119721Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:73} SendRegisterNode 2026-01-08T15:30:55.119833Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:323} StartInvalidGroupProxy GroupId# 4294967295 2026-01-08T15:30:55.120001Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:335} StartRequestReportingThrottler 2026-01-08T15:30:55.139948Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:184: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2026-01-08T15:30:55.140035Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:344: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2026-01-08T15:30:55.144532Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:80:2080] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.144741Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:81:2081] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.144912Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:82:2082] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.145055Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:83:2083] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.145209Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:84:2084] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.145352Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:85:2085] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.145485Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:72:2075] Create Queue# [3:86:2086] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.145515Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2026-01-08T15:30:55.145623Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:49: TClient[72057594037932033] ::Bootstrap [3:73:2076] 2026-01-08T15:30:55.145662Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037932033] lookup [3:73:2076] 2026-01-08T15:30:55.145714Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:284: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2026-01-08T15:30:55.145784Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:27} Bootstrap 2026-01-08T15:30:55.146473Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2026-01-08T15:30:55.146645Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:348} Bootstrap 2026-01-08T15:30:55.162126Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2026-01-08T15:30:55.162385Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:235} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2026-01-08T15:30:55.162774Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:124} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2026-01-08T15:30:55.163089Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:69} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2026-01-08T15:30:55.164858Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:287} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2026-01-08T15:30:55.164928Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:36} StartLocalProxy GroupId# 0 HasGroupInfo# true GroupInfoGeneration# 1 2026-01-08T15:30:55.165913Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:97:2078] ControllerId# 72057594037932033 2026-01-08T15:30:55.165956Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:73} SendRegisterNode 2026-01-08T15:30:55.166043Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:323} StartInvalidGroupProxy GroupId# 4294967295 2026-01-08T15:30:55.166196Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:335} StartRequestReportingThrottler 2026-01-08T15:30:55.168302Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:348} Bootstrap 2026-01-08T15:30:55.171647Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2026-01-08T15:30:55.171840Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:235} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2026-01-08T15:30:55.172228Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:124} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2026-01-08T15:30:55.172468Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:36} StartLocalProxy GroupId# 0 HasGroupInfo# true GroupInfoGeneration# 1 2026-01-08T15:30:55.173430Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:108:2076] ControllerId# 72057594037932033 2026-01-08T15:30:55.173470Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:73} SendRegisterNode 2026-01-08T15:30:55.173546Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:323} StartInvalidGroupProxy GroupId# 4294967295 2026-01-08T15:30:55.173656Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:335} StartRequestReportingThrottler 2026-01-08T15:30:55.188699Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:184: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2026-01-08T15:30:55.188776Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:344: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2026-01-08T15:30:55.192493Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:115:2080] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.192733Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:116:2081] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.192933Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:117:2082] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.193080Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:118:2083] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.193226Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:119:2084] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.193362Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:120:2085] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.193546Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:107:2075] Create Queue# [2:121:2086] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.193582Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2026-01-08T15:30:55.193677Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:49: TClient[72057594037932033] ::Bootstrap [2:108:2076] 2026-01-08T15:30:55.193709Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037932033] lookup [2:108:2076] 2026-01-08T15:30:55.193776Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:284: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2026-01-08T15:30:55.193823Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:27} Bootstrap 2026-01-08T15:30:55.194552Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2026-01-08T15:30:55.194741Z node 3 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:882: Handle TEvForward tabletId: 72057594037932033 entry.State: StResolve leader: [0:0:0] followers: 0 ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2026-01-08T15:30:55.208962Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:148: TClient[72057594037932033] queue send [3:73:2076] 2026-01-08T15:30:55.209063Z node 3 :BS_NODE DEBUG: {NWDC53@distconf.cpp:358} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2026-01-08T15:30:55.219368Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:184: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2026-01-08T15:30:55.219437Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:344: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2026-01-08T15:30:55.221460Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:96:2077] Create Queue# [1:129:2083] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.221649Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:96:2077] Create Queue# [1:130:2084] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.221795Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:96:2077] Create Queue# [1:131:2085] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.221938Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:96:2077] Create Queue# [1:132:2086] targetNodeId# 1 Marker# DSP01 2026-01-08T15:30:55.222103Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:96:2077] Create Queue# [1:133:2087] targetNodeId# ... ID: 72057594037936131 Cookie: 2} 2026-01-08T15:32:00.514104Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.514180Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.514220Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.514289Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:781: ApplyEntry tabletId: 72057594037936131 leader: [0:0:0] followers: 0 2026-01-08T15:32:00.514383Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:170: TClient[72057594037936131] forward result error, check reconnect [30:199:2162] 2026-01-08T15:32:00.514421Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:569: TClient[72057594037936131] schedule retry [30:199:2162] 2026-01-08T15:32:00.561592Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:426: TClient[72057594037936129] client retry [31:56:2064] 2026-01-08T15:32:00.561661Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037936129] lookup [31:56:2064] 2026-01-08T15:32:00.561770Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:882: Handle TEvForward tabletId: 72057594037936129 entry.State: StNormal leader: [0:0:0] (known problem) followers: 0 ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2026-01-08T15:32:00.561818Z node 31 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 31 selfDC 2 leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:32:00.561966Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:287: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:32:00.562334Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2026-01-08T15:32:00.562396Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2026-01-08T15:32:00.562435Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2026-01-08T15:32:00.562639Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.562714Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.562757Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.562846Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:781: ApplyEntry tabletId: 72057594037936129 leader: [0:0:0] followers: 0 2026-01-08T15:32:00.562905Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:170: TClient[72057594037936129] forward result error, check reconnect [31:56:2064] 2026-01-08T15:32:00.562940Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:569: TClient[72057594037936129] schedule retry [31:56:2064] 2026-01-08T15:32:00.574057Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:426: TClient[72057594037936129] client retry [30:43:2064] 2026-01-08T15:32:00.574125Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037936129] lookup [30:43:2064] 2026-01-08T15:32:00.574229Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:882: Handle TEvForward tabletId: 72057594037936129 entry.State: StNormal leader: [0:0:0] (known problem) followers: 0 ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2026-01-08T15:32:00.574276Z node 30 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 30 selfDC 1 leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:32:00.574438Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:287: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:32:00.574558Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2026-01-08T15:32:00.574621Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2026-01-08T15:32:00.574662Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2026-01-08T15:32:00.574710Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.574794Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.574835Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.574903Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:781: ApplyEntry tabletId: 72057594037936129 leader: [0:0:0] followers: 0 2026-01-08T15:32:00.574959Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:170: TClient[72057594037936129] forward result error, check reconnect [30:43:2064] 2026-01-08T15:32:00.574991Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:569: TClient[72057594037936129] schedule retry [30:43:2064] 2026-01-08T15:32:00.657095Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:426: TClient[72057594037936131] client retry [30:199:2162] 2026-01-08T15:32:00.657166Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037936131] lookup [30:199:2162] 2026-01-08T15:32:00.657254Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:882: Handle TEvForward tabletId: 72057594037936131 entry.State: StNormal leader: [0:0:0] (known problem) followers: 0 ev: {EvForward TabletID: 72057594037936131 Ev: nullptr Flags: 1:2:0} 2026-01-08T15:32:00.657299Z node 30 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 30 selfDC 1 leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:32:00.657444Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:287: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2026-01-08T15:32:00.657574Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2026-01-08T15:32:00.657629Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2026-01-08T15:32:00.657668Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2026-01-08T15:32:00.657713Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.657789Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.657840Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:399: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2026-01-08T15:32:00.657919Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:781: ApplyEntry tabletId: 72057594037936131 leader: [0:0:0] followers: 0 2026-01-08T15:32:00.657980Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:170: TClient[72057594037936131] forward result error, check reconnect [30:199:2162] 2026-01-08T15:32:00.658014Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:569: TClient[72057594037936131] schedule retry [30:199:2162] 2026-01-08T15:32:00.731096Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{18, NKikimr::NHive::TTxDeleteNode} queued, type NKikimr::NHive::TTxDeleteNode 2026-01-08T15:32:00.731212Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{18, NKikimr::NHive::TTxDeleteNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:32:00.731441Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{18, NKikimr::NHive::TTxDeleteNode} hope 1 -> done Change{10, redo 64b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2026-01-08T15:32:00.731533Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{18, NKikimr::NHive::TTxDeleteNode} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:32:00.732128Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:49: TClient[72057594037927937] ::Bootstrap [30:541:2322] 2026-01-08T15:32:00.732197Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72057594037927937] lookup [30:541:2322] 2026-01-08T15:32:00.732307Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:882: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal leader: [30:452:2266] followers: 0 ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2026-01-08T15:32:00.732387Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:667: SelectForward node 30 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [30:452:2266] 2026-01-08T15:32:00.732495Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:148: TClient[72057594037927937] queue send [30:541:2322] 2026-01-08T15:32:00.732577Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:420: TClient[72057594037927937] received pending shutdown [30:541:2322] 2026-01-08T15:32:00.732647Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:188: TClient[72057594037927937] forward result local node, try to connect [30:541:2322] 2026-01-08T15:32:00.732723Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:687: TClient[72057594037927937]::SendEvent [30:541:2322] 2026-01-08T15:32:00.732855Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [30:541:2322] 2026-01-08T15:32:00.733104Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:308: TClient[72057594037927937] connected with status OK role: Leader [30:541:2322] 2026-01-08T15:32:00.733171Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:323: TClient[72057594037927937] send queued [30:541:2322] 2026-01-08T15:32:00.733224Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594037927937] push event to server [30:541:2322] 2026-01-08T15:32:00.733310Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:331: TClient[72057594037927937] shutdown pipe due to pending shutdown request [30:541:2322] 2026-01-08T15:32:00.733368Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:514: TClient[72057594037927937] notify reset [30:541:2322] 2026-01-08T15:32:00.733449Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [30:540:2321] EventType# 268830214 |86.6%| [TM] {BAZEL_UPLOAD} ydb/core/mind/hive/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] Test command err: 2026-01-08T15:30:51.133456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:110;event=initialize_shard;step=OnActivateExecutor; 2026-01-08T15:30:51.307983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:129;event=initialize_shard;step=initialize_tiring_finished; 2026-01-08T15:30:51.308214Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2026-01-08T15:30:51.330258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:30:51.334729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:30:51.335018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:30:51.335175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:30:51.335313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:30:51.335421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:30:51.335532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:30:51.336852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:30:51.336977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:30:51.337148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.337263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:30:51.337366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:30:51.337501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:30:51.391675Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2026-01-08T15:30:51.394523Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:131;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=Granules; 2026-01-08T15:30:51.394611Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2026-01-08T15:30:51.394820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:51.394980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:30:51.395085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:30:51.395138Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2026-01-08T15:30:51.395251Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2026-01-08T15:30:51.395313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:30:51.395364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:30:51.395395Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2026-01-08T15:30:51.395568Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:51.395631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:30:51.395672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:30:51.395705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2026-01-08T15:30:51.395790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2026-01-08T15:30:51.395849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:30:51.395895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:30:51.411030Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2026-01-08T15:30:51.411204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:30:51.411294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:30:51.411333Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2026-01-08T15:30:51.411392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:30:51.411510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:30:51.411549Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2026-01-08T15:30:51.411829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:30:51.411901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:30:51.411955Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2026-01-08T15:30:51.412084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:30:51.412129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.412157Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.412203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:30:51.412267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:30:51.412324Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2026-01-08T15:30:51.412372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:30:51.412409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:30:51.412456Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=19;type=CopyBlobIdsToV2; 2026-01-08T15:30:51.412644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T15:30:51.412693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLAS ... ge_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2026-01-08T15:31:57.737665Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=951; 2026-01-08T15:31:57.737717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=58866; 2026-01-08T15:31:57.737762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=58977; 2026-01-08T15:31:57.737836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=12; 2026-01-08T15:31:57.738224Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=332; 2026-01-08T15:31:57.738271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=59985; 2026-01-08T15:31:57.738668Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=338; 2026-01-08T15:31:57.738800Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=72; 2026-01-08T15:31:57.739245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=395; 2026-01-08T15:31:57.739619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=322; 2026-01-08T15:31:57.761266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=21569; 2026-01-08T15:31:57.778450Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=16937; 2026-01-08T15:31:57.778560Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2026-01-08T15:31:57.778615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2026-01-08T15:31:57.778654Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2026-01-08T15:31:57.778726Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=42; 2026-01-08T15:31:57.778769Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2026-01-08T15:31:57.778855Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=46; 2026-01-08T15:31:57.778916Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=9; 2026-01-08T15:31:57.778991Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=36; 2026-01-08T15:31:57.779074Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=51; 2026-01-08T15:31:57.779143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=39; 2026-01-08T15:31:57.779174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=109589; 2026-01-08T15:31:57.779300Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=54584568;raw_bytes=52825050;count=9;records=675000} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22512;raw_bytes=19000;count=1;records=200} inactive {blob_bytes=344000;raw_bytes=15800;count=200;records=200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2026-01-08T15:31:57.779403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];process=SwitchToWork;fline=columnshard.cpp:78;event=initialize_shard;step=SwitchToWork; 2026-01-08T15:31:57.779459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];process=SwitchToWork;fline=columnshard.cpp:81;event=initialize_shard;step=SignalTabletActive; 2026-01-08T15:31:57.779546Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];process=SwitchToWork;fline=columnshard_impl.cpp:1532;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2026-01-08T15:31:57.779589Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];process=SwitchToWork;fline=column_engine_logs.cpp:543;event=OnTieringModified;new_count_tierings=0; 2026-01-08T15:31:57.779780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:448;event=EnqueueBackgroundActivities;periodic=0; 2026-01-08T15:31:57.779851Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=tiling.cpp:693;message=tiling compaction: actualize called; 2026-01-08T15:31:57.779884Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=tiling.cpp:710;message=tiling compaction: returning DoGetUsefulMetric zero; 2026-01-08T15:31:57.779928Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=column_engine_logs.cpp:260;event=StartCleanup;portions_count=1; 2026-01-08T15:31:57.779983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=column_engine_logs.cpp:273;event=StartCleanupStop;snapshot=plan_step=1767885955099;tx_id=18446744073709551615;;current_snapshot_ts=1767886252555; 2026-01-08T15:31:57.780028Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=column_engine_logs.cpp:341;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2026-01-08T15:31:57.780064Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:803;background=cleanup;skip_reason=no_changes; 2026-01-08T15:31:57.780095Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:835;background=cleanup;skip_reason=no_changes; 2026-01-08T15:31:57.780189Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:757;background=ttl;skip_reason=no_changes; 2026-01-08T15:31:57.780370Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: fline=columnshard.cpp:505;event=TEvReportBaseStatistics;ReportBaseStatisticsPeriodMs=60000;scheduleDuration=60.184000s; 2026-01-08T15:31:57.782893Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:269;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2026-01-08T15:31:57.783161Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:260;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2026-01-08T15:31:57.783212Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;event=EnqueueBackgroundActivities;periodic=0; 2026-01-08T15:31:57.783278Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=tiling.cpp:710;message=tiling compaction: returning DoGetUsefulMetric zero; 2026-01-08T15:31:57.783323Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:260;event=StartCleanup;portions_count=1; 2026-01-08T15:31:57.783382Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:273;event=StartCleanupStop;snapshot=plan_step=1767885955099;tx_id=18446744073709551615;;current_snapshot_ts=1767886252555; 2026-01-08T15:31:57.783427Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:341;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2026-01-08T15:31:57.783487Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:803;background=cleanup;skip_reason=no_changes; 2026-01-08T15:31:57.783528Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:835;background=cleanup;skip_reason=no_changes; 2026-01-08T15:31:57.783599Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:172;event=skip_actualization;waiting=1.000000s; 2026-01-08T15:31:57.783646Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:757;background=ttl;skip_reason=no_changes; 2026-01-08T15:31:57.784170Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvReportExecutorStatistics;fline=columnshard.cpp:515;event=TEvReportExecutorStatistics;ReportExecutorStatisticsPeriodMs=60000;scheduleDuration=60.055000s; 2026-01-08T15:31:57.784211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:5443:7075];ev=NKikimr::NColumnShard::TEvPrivate::TEvReportExecutorStatistics;fline=columnshard.cpp:447;No CurrentSchemeShardId=9437184; |86.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_rw/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink >> KeyValueGRPCService::SimpleGetStorageChannelStatusV1 [GOOD] >> KeyValueGRPCService::SimpleGetStorageChannelStatusV2 >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated [GOOD] >> TControlPlaneProxyTest::ShouldSendListQueries >> BasicUsage::CreateTopicWithSharedConsumer_MoveDeadLetterPolicy [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_DeleteDeadLetterPolicy >> TControlPlaneProxyTest::ShouldSendListQueries [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeQuery >> KqpScripting::StreamExecuteYqlScriptMixed |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |86.6%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> KqpScripting::ScanQuery >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> KqpSinkMvcc::OltpMultiSinks [GOOD] >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::TableArrayInsert-useSink >> TxUsage::WriteToTopic_Demo_13_Table >> TControlPlaneProxyTest::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetQueryStatus >> TKesusTest::TestAcquireTimeoutAfterReboot [GOOD] >> TKesusTest::TestAcquireSemaphoreViaRelease >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] >> KqpPg::CreateUniqPgColumn-useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink >> TControlPlaneProxyTest::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyQuery >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink >> KqpPg::CreateTableBulkUpsertAndRead [GOOD] >> KqpPg::CopyTableSerialColumns+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] Test command err: 2026-01-08T15:31:12.922781Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2026-01-08T15:31:12.922931Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2026-01-08T15:31:12.943765Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2026-01-08T15:31:12.944286Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2026-01-08T15:31:12.972220Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2026-01-08T15:31:12.972870Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:137:2161], cookie=16943602355256132468, session=0, seqNo=0) 2026-01-08T15:31:12.973063Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2026-01-08T15:31:12.986814Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:137:2161], cookie=16943602355256132468, session=1) 2026-01-08T15:31:12.987561Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:137:2161], cookie=111, session=1, semaphore="Lock1" count=1) 2026-01-08T15:31:12.987714Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2026-01-08T15:31:12.987805Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2026-01-08T15:31:13.000499Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:137:2161], cookie=111) 2026-01-08T15:31:13.000859Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:137:2161], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2026-01-08T15:31:13.019106Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:137:2161], cookie=222) 2026-01-08T15:31:13.019718Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:153:2175], cookie=7156840230091194271, name="Lock1") 2026-01-08T15:31:13.019835Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:153:2175], cookie=7156840230091194271) 2026-01-08T15:31:13.804066Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2026-01-08T15:31:13.804180Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2026-01-08T15:31:13.838678Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2026-01-08T15:31:13.838794Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2026-01-08T15:31:13.865167Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2026-01-08T15:31:13.866051Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:135:2159], cookie=16571462389654236229, session=0, seqNo=0) 2026-01-08T15:31:13.866203Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2026-01-08T15:31:13.880424Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:135:2159], cookie=16571462389654236229, session=1) 2026-01-08T15:31:13.880747Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=17667397003601635518, session=0, seqNo=0) 2026-01-08T15:31:13.880875Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2026-01-08T15:31:13.894243Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=17667397003601635518, session=2) 2026-01-08T15:31:13.895424Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2026-01-08T15:31:13.895589Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2026-01-08T15:31:13.895679Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2026-01-08T15:31:13.908276Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=111) 2026-01-08T15:31:13.908650Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:135:2159], cookie=112, session=1, semaphore="Lock2" count=1) 2026-01-08T15:31:13.908788Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2026-01-08T15:31:13.908882Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2026-01-08T15:31:13.924145Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:135:2159], cookie=112) 2026-01-08T15:31:13.924444Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2026-01-08T15:31:13.924636Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2026-01-08T15:31:13.947168Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=222) 2026-01-08T15:31:13.947259Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=223) 2026-01-08T15:31:13.947674Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=333, session=2, semaphore="Lock1" count=1) 2026-01-08T15:31:13.948034Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=334, session=2, semaphore="Lock2" count=18446744073709551615) 2026-01-08T15:31:13.961771Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=333) 2026-01-08T15:31:13.961859Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=334) 2026-01-08T15:31:14.475888Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:14.489244Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:14.910560Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:14.924467Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:15.310803Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:15.330864Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:15.710727Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:15.727443Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:16.170586Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:16.188155Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:16.594876Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:16.612692Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:17.006868Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:17.023130Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:17.418916Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:17.432762Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:17.822810Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:17.834921Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:18.282661Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:18.297221Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:18.738669Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:18.752494Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:19.188181Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:19.211200Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:19.597437Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:19.619617Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:20.022778Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:20.036858Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:20.502599Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:20.521195Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:20.921272Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:20.935215Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:21.346061Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:21.375058Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:21.789049Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:21.803152Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:22.202684Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:22.230980Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:22.707388Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:31:22.723651Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:31:23.130236Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [7205759403 ... 026-01-08T15:32:01.347682Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:01.790733Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:01.807308Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:02.214959Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:02.235055Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:02.617582Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:02.632702Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:02.941649Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:02.959228Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:03.379905Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:03.406225Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:03.870750Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:03.882965Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:04.288972Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:04.310112Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:04.731602Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:04.743766Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:05.142739Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:05.158895Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:05.550694Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:05.566845Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:05.990641Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:06.009729Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:06.414812Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:06.431142Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:06.834922Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:06.850970Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:07.240962Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:07.257949Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:07.676026Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2026-01-08T15:32:07.688820Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2026-01-08T15:32:08.230646Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:30: [72057594037927937] TTxSemaphoreTimeout::Execute (session=2, semaphore=1) 2026-01-08T15:32:08.230747Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2026-01-08T15:32:08.252466Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:71: [72057594037927937] TTxSemaphoreTimeout::Complete (session=2, semaphore=1) 2026-01-08T15:32:08.275125Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:598:2535], cookie=11741508310746256117) 2026-01-08T15:32:08.275262Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:598:2535], cookie=11741508310746256117) 2026-01-08T15:32:08.275840Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:601:2538], cookie=3034385633954728824) 2026-01-08T15:32:08.275912Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:601:2538], cookie=3034385633954728824) 2026-01-08T15:32:08.276429Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:604:2541], cookie=5941330987996641303, name="Lock1") 2026-01-08T15:32:08.276504Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:604:2541], cookie=5941330987996641303) 2026-01-08T15:32:08.277018Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:607:2544], cookie=2766442129791538168, name="Lock1") 2026-01-08T15:32:08.277090Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:607:2544], cookie=2766442129791538168) 2026-01-08T15:32:09.010036Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2026-01-08T15:32:09.010140Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2026-01-08T15:32:09.049543Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2026-01-08T15:32:09.050114Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2026-01-08T15:32:09.089659Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2026-01-08T15:32:09.090276Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2161], cookie=10907377477315181087, session=0, seqNo=0) 2026-01-08T15:32:09.090455Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2026-01-08T15:32:09.109338Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2161], cookie=10907377477315181087, session=1) 2026-01-08T15:32:09.109701Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2161], cookie=26108964545415113, session=0, seqNo=0) 2026-01-08T15:32:09.109845Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2026-01-08T15:32:09.126021Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2161], cookie=26108964545415113, session=2) 2026-01-08T15:32:09.126400Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2161], cookie=15514330551242667524, session=0, seqNo=0) 2026-01-08T15:32:09.126541Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 3 2026-01-08T15:32:09.151716Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2161], cookie=15514330551242667524, session=3) 2026-01-08T15:32:09.152366Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:150:2172], cookie=4270246311825946218, name="Sem1", limit=3) 2026-01-08T15:32:09.152523Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2026-01-08T15:32:09.179134Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:150:2172], cookie=4270246311825946218) 2026-01-08T15:32:09.179507Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2161], cookie=111, session=1, semaphore="Sem1" count=2) 2026-01-08T15:32:09.179659Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2026-01-08T15:32:09.179888Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2161], cookie=222, session=2, semaphore="Sem1" count=2) 2026-01-08T15:32:09.180097Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2161], cookie=333, session=3, semaphore="Sem1" count=1) 2026-01-08T15:32:09.196099Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2161], cookie=111) 2026-01-08T15:32:09.196215Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2161], cookie=222) 2026-01-08T15:32:09.196251Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2161], cookie=333) 2026-01-08T15:32:09.196936Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:159:2181], cookie=4748429342659224284, name="Sem1") 2026-01-08T15:32:09.197036Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:159:2181], cookie=4748429342659224284) 2026-01-08T15:32:09.197537Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:162:2184], cookie=1588686779836941114, name="Sem1") 2026-01-08T15:32:09.197622Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:162:2184], cookie=1588686779836941114) 2026-01-08T15:32:09.197871Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:136:2161], cookie=444, name="Sem1") 2026-01-08T15:32:09.197974Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2026-01-08T15:32:09.198045Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2026-01-08T15:32:09.198100Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2026-01-08T15:32:09.215798Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:136:2161], cookie=444) 2026-01-08T15:32:09.216567Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:167:2189], cookie=16622983986494862058, name="Sem1") 2026-01-08T15:32:09.216673Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:167:2189], cookie=16622983986494862058) 2026-01-08T15:32:09.217195Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:170:2192], cookie=7806817838446984254, name="Sem1") 2026-01-08T15:32:09.217273Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:170:2192], cookie=7806817838446984254) |86.6%| [TM] {BAZEL_UPLOAD} ydb/core/kesus/tablet/ut/unittest >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 11571, MsgBus: 4140 2026-01-08T15:31:07.568566Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013701842839218:2198];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:07.571040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:07.604788Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:08.055150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:08.055268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:08.057633Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:08.108538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:08.201921Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013701842839057:2081] 1767886267546053 != 1767886267546056 2026-01-08T15:31:08.231682Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:08.285098Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:08.583459Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:08.644783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:08.644805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:08.644811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:08.644891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:09.338861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:09.351990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:12.566996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013701842839218:2198];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:12.567081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:12.746867Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013723317676429:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:12.747039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:12.747981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013723317676441:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:12.748041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013723317676442:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:12.748399Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:12.752479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:12.773564Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013723317676445:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:12.944444Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013723317676496:2540] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:13.326005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_table.cpp:814) 2026-01-08T15:31:13.544073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:31:13.544309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:31:13.544516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:31:13.544631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:31:13.544718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:31:13.544823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:31:13.544914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:31:13.545020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:31:13.545106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:31:13.545215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:31:13.545310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:31:13.545393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:31:13.545498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;self_id=[1:7593013727612643936:2343];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:31:13.549677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:31:13.549740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:31:13.549917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:31:13.550022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:31:13.550122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:31:13.550205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;self_id=[1:7593013727612643935:2342];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCou ... : fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7593013852905506072:2247];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:47.107478Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:48.274172Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013878675310516:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:48.274364Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:48.274670Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013878675310552:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:48.275840Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593013878675310554:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:48.275959Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:48.279872Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:48.294364Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593013878675310555:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:48.358183Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593013878675310607:2548] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:48.437977Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:48.564046Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:50.227694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) Trying to start YDB, gRPC: 12703, MsgBus: 3194 2026-01-08T15:31:54.386137Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593013902851940707:2144];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:54.387884Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:54.414426Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:54.630421Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:54.698652Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:54.700700Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:54.700796Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:54.706279Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:55.003470Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:55.003496Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:55.003506Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:55.003623Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:55.117394Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:55.386608Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:56.199344Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:59.375342Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593013902851940707:2144];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:59.375433Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:00.772911Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593013928621745245:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.773110Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.773697Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593013928621745281:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.779117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:00.793549Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593013928621745317:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.793656Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.799860Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:32:00.801674Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593013928621745326:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.801815Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593013928621745283:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:32:00.801876Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:00.865338Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593013928621745339:2545] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:00.937518Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:01.046507Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:02.912588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |86.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/tx/unittest |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |86.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query >> TControlPlaneProxyTest::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteQuery >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> KqpPg::DropTablePg [GOOD] >> KqpPg::DropTablePgMultiple >> TControlPlaneProxyTest::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendControlQuery >> KqpYql::UuidPrimaryKey |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |86.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |86.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview >> KqpYql::EvaluateExpr1 >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] >> TControlPlaneProxyTest::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetResultData >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> KqpPg::V1CreateTable >> NFq::TRegisterCheckTestBase::ShouldRegisterCheckNextGeneration >> TControlPlaneProxyTest::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyTest::ShouldSendListJobs >> TMLPTests::SetRetentionStorage [GOOD] >> KqpScripting::ScanQuery [GOOD] >> KqpScripting::ScanQueryDisable >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink >> BasicUsage::CreateTopicWithSharedConsumer_DeleteDeadLetterPolicy [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_DisabledDeadLetterPolicy >> KeyValueGRPCService::SimpleGetStorageChannelStatusV2 [GOOD] >> KeyValueGRPCService::SimpleCreateAlterDropVolume >> KqpScripting::StreamExecuteYqlScriptMixed [GOOD] >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan >> TControlPlaneProxyTest::ShouldSendListJobs [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeJob >> KqpPg::CopyTableSerialColumns+useSink [GOOD] >> KqpPg::CopyTableSerialColumns-useSink |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |86.6%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort+UseSink >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query >> NFq::TRegisterCheckTestBase::ShouldRegisterCheckNextGeneration [GOOD] >> NFq::TRegisterCheckTestBase::ShouldRegisterCheckNewGenerationAndTransact >> TControlPlaneProxyTest::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateConnection |86.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |86.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |86.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant >> NFq::TRegisterCheckTestBase::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> TxUsage::WriteToTopic_Demo_42_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/view/unittest >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] Test command err: Trying to start YDB, gRPC: 27746, MsgBus: 7255 2026-01-08T15:27:23.224616Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012740612353071:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:23.225224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:27:24.640235Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:25.876499Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:26.722395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:26.724349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:26.983185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:27.011521Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:27.015941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:27.021444Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012740612352961:2081] 1767886043182512 != 1767886043182515 2026-01-08T15:27:27.149996Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:27:28.338877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012740612353071:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:28.339387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:27:28.973338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:27:28.973357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:27:28.973363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:27:28.975953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:27:34.424926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:35.049915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:27:40.218937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:27:40.218960Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:46.670811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012839396601776:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:46.678654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:46.690486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012839396601785:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:46.695270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 4206, MsgBus: 28518 2026-01-08T15:27:54.772076Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593012874705237714:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:54.779938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:27:55.258688Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:55.545735Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:55.795767Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:56.051476Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:56.186518Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:56.774800Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:56.866123Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:56.923980Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:56.924271Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:57.001301Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:57.215069Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593012874705237690:2081] 1767886074746981 != 1767886074746984 2026-01-08T15:27:57.247472Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:58.268434Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:27:59.100227Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:27:59.100244Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:27:59.100251Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:27:59.100768Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:27:59.767041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593012874705237714:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:59.768916Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:28:05.927307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:05.933611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:28:08.010489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012934834780781:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:08.010631Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:08.011283Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012934834780791:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:28:08.011341Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 23932, MsgBus: 12807 2026-01-08T15:28:09.292642Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593012937381806021:2258];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:28:09.292911Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:28:09.330264Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:28:09.430733Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:28:09.430827Z no ... tagesKqpPhysical-PushFlatmapToStageKqpPhysical-BuildStreamLookupTableStagesKqpPhysical-BuildStreamIdxLookupJoinStagesKqpPhysical-PushExtractMembersToStageKqpPhysical-PushFlatmapToStageKqpPhysical-BuildSortStageKqpPhysical-RewriteKqpReadTableKqpPeepholeFinal-SetCombinerMemoryLimitKqpPeepholeNewOperator-RewriteWideCombinerToDqHashCombinerCompiled query: ( (let $1 (Read! world (DataSource '"kikimr" '"db") (Key '('table (String '"/Root/count_episodes_with_titles"))) (Void) '())) (let $2 (DataSink 'result)) (let $3 (Right! $1)) (let $4 (Write! (Left! $1) $2 (Key) (RemoveSystemMembers (Sort (PersistableRepr (SqlProject $3 '((SqlProjectStarItem (TypeOf $3) '"" (lambda '($5) $5) '())))) '((Bool 'true) (Bool 'true)) (lambda '($6) '((PersistableRepr (Member $6 '"series")) (PersistableRepr (Member $6 '"season")))))) '('('type) '('autoref)))) (return (Commit! $4 $2)) ) Optimized expr: ( (let $1 (DataSink '"kikimr" '"db")) (let $2 (DataSource '"kikimr" '"db")) (let $3 '('"season_id" '"series_id")) (let $4 (AggApply 'count_all (StructType) (lambda '($31) (Void)))) (let $5 (Aggregate (ExtractMembers (Right! (KiReadTable! world $2 (Key '('table (String '"/Root/episodes"))) (Void) '())) $3) $3 '('('Count0 $4)) '())) (let $6 '('"season_id" '"series_id" '"title")) (let $7 '('Inner '"episodes" '"series" '('"episodes" '"series_id") '('"series" '"series_id") '())) (let $8 '('"episodes" '"series_id" '"episodes" '"season_id")) (let $9 '('"seasons" '"series_id" '"seasons" '"season_id")) (let $10 '('Inner $7 '"seasons" $8 $9 '())) (let $11 '('"rename" '"episodes.Count0" '"episode_count")) (let $12 '('"rename" '"episodes.season_id" '"")) (let $13 '('"rename" '"episodes.series_id" '"")) (let $14 '('"rename" '"seasons.season_id" '"")) (let $15 '('"rename" '"seasons.series_id" '"")) (let $16 '('"rename" '"seasons.title" '"season")) (let $17 '('"rename" '"series.series_id" '"")) (let $18 '('"rename" '"series.title" '"series")) (let $19 '($11 $12 $13 $14 $15 $16 $17 $18)) (let $20 (EquiJoin '($5 '"episodes") '((ExtractMembers (Right! (KiReadTable! world $2 (Key '('table (String '"/Root/series"))) (Void) '())) '('"series_id" '"title")) '"series") '((ExtractMembers (Right! (KiReadTable! world $2 (Key '('table (String '"/Root/seasons"))) (Void) '())) $6) '"seasons") $10 $19)) (let $21 (Bool 'true)) (let $22 (Sort $20 '($21 $21) (lambda '($32) '((Member $32 '"series") (Member $32 '"season"))))) (let $23 '('"db" '"/Root/episodes" '"Select")) (let $24 '('"db" '"/Root/series" '"Select")) (let $25 '('"db" '"/Root/seasons" '"Select")) (let $26 '($23 $24 $25)) (let $27 '('('"mode" '"flush"))) (let $28 (KiExecDataQuery! world $1 (DataQueryBlocks (TKiDataQueryBlock '('($22 '() '0 '0)) (KiEffects) $26 '())) $27 (Void))) (let $29 (DataSink 'result)) (let $30 (ResPull! (Left! $28) $29 (Key) (Nth (Right! $28) '0) '('('type) '('autoref)) '"kikimr")) (return (Commit! (Commit! $30 $29) $1 $27)) ) KqpLogical-ApplyExtractMembersToReadTableKqpLogical-ApplyExtractMembersToReadTableKqpLogical-ApplyExtractMembersToReadTableKqpLogical-RewriteAggregateKqpLogical-RewriteEquiJoinKqpLogical-JoinToIndexLookupKqpLogical-JoinToIndexLookupKqpPhysical-BuildReadTableRangesStageKqpPhysical-PushAggregateCombineToStageKqpPhysical-ExpandAggregatePhaseKqpPhysical-ExpandAggregatePhaseKqpPhysical-ExpandAggregatePhaseKqpPhysical-ExpandAggregatePhaseKqpPhysical-BuildShuffleStageKqpPhysical-BuildStreamLookupTableStagesKqpPhysical-BuildStreamIdxLookupJoinStagesKqpPhysical-PushFlatmapToStageKqpPhysical-BuildStreamLookupTableStagesKqpPhysical-BuildStreamIdxLookupJoinStagesKqpPhysical-PushExtractMembersToStageKqpPhysical-PushFlatmapToStageKqpPhysical-BuildSortStageKqpPhysical-RewriteKqpReadTableKqpPeepholeFinal-SetCombinerMemoryLimitKqpPeepholeNewOperator-RewriteWideCombinerToDqHashCombinerCompiled query: ( (return (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/count_episodes_with_titles")) '('typeId (String '"VIEW"))) (Void) '('('mode 'dropObject)))) ) Optimized expr: ( (let $1 (DataSink '"kikimr" '"db")) (let $2 (KiDropObject! world $1 '"/Root/count_episodes_with_titles" '"VIEW" '() '0)) (return (Commit! $2 $1 '('('"mode" '"flush")))) ) Trying to start YDB, gRPC: 30069, MsgBus: 1719 2026-01-08T15:31:59.154444Z node 23 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7593013923839596566:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:59.183941Z node 23 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:59.306512Z node 23 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:59.460283Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [23:7593013923839596542:2081] 1767886319123044 != 1767886319123047 2026-01-08T15:31:59.460781Z node 23 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:59.464816Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:59.464975Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:59.514450Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:59.600349Z node 23 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:59.657018Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:59.657060Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:59.657091Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:59.657262Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:00.225645Z node 23 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:01.135857Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:01.148601Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:04.134631Z node 23 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[23:7593013923839596566:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:04.134774Z node 23 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:11.371067Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7593013975379205024:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.371376Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.372034Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7593013975379205034:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.372170Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.444051Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:32:11.481109Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7593013975379205054:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.481282Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7593013975379205059:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.481314Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.485826Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7593013975379205061:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.486097Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.489252Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:11.515239Z node 23 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [23:7593013975379205062:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:32:11.620552Z node 23 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [23:7593013975379205114:2581] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:12.196961Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/view/unittest >> KqpPg::DropTablePgMultiple [GOOD] >> KqpPg::DropTableIfExists >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendListConnections >> KqpPg::CreateUniqComplexPgColumn+useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn-useSink >> KqpScripting::StreamExecuteYqlScriptSeveralQueries >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink >> TxUsage::WriteToTopic_Demo_13_Query >> TxUsage::WriteToTopic_Demo_42_Query >> KqpYql::UuidPrimaryKey [GOOD] >> DataShardStats::HasSchemaChanges_Columns [GOOD] >> DataShardStats::HasSchemaChanges_Families >> KqpYql::EvaluateExpr1 [GOOD] >> KqpYql::EvaluateExpr2 >> TControlPlaneProxyTest::ShouldSendListConnections [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeConnection ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/pqtablet/partition/mlp/ut/unittest >> TMLPTests::SetRetentionStorage [GOOD] Test command err: 2026-01-08T15:27:08.885487Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012673997793401:2175];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:08.898176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:27:09.100831Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:27:09.348452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:09.348563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:09.386913Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:09.420671Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012673997793262:2081] 1767886028644507 != 1767886028644510 2026-01-08T15:27:09.422490Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:09.423192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:09.559275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0000d8/r3tmp/yandexR8YBBE.tmp 2026-01-08T15:27:09.559298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0000d8/r3tmp/yandexR8YBBE.tmp 2026-01-08T15:27:09.559423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0000d8/r3tmp/yandexR8YBBE.tmp 2026-01-08T15:27:09.559515Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:27:09.600905Z INFO: TTestServer started on Port 30739 GrpcPort 7553 2026-01-08T15:27:09.660682Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions PQClient connected to localhost:7553 2026-01-08T15:27:09.841698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:09.919399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:27:10.014197Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:27:10.094759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2026-01-08T15:27:11.904051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012686882696198:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:11.904129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012686882696190:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:11.904244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:11.904664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012686882696205:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:11.904705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:27:11.908298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:27:11.918649Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593012686882696204:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:27:12.110386Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593012691177663569:2646] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:27:12.130050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:27:12.162595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:27:12.272802Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593012691177663595:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:27:12.274761Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ODc3OWJhNjgtNjg0NTA1N2QtNmU5Njc1YjYtOWRlZjViMTE=, ActorId: [1:7593012686882696188:2329], ActorState: ExecuteState, LegacyTraceId: 01kef3g30y66dccj52b17jxmb1, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:27:12.277010Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:27:12.299199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593012691177663860:2823] 2026-01-08T15:27:13.880805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012673997793401:2175];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:13.880845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2026-01-08T15:27:17.974528Z :TODO INFO: TTopicSdkTestSetup started 2026-01-08T15:27:18.297326Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:27:18.348162Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593012716947467762:2888] connected; active server actors: 1 2026-01-08T15:27:18.349506Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:27:18.358193Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:27:18.358298Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2026-01-08T15:27:18.390604Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:27:18.394135Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3055: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2026-01-08T15:27:18.395400Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:788: [PQ: 72075186224037892] no config, start with empty partitions and default config 2026-01-08T15:27:18.395435Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:27:18.395466Z node 1 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72075186224037892] doesn't have tx writes i ... NFO: ownerinfo.cpp:30: new Cookie src|475636c0-f5cd7786-e1d7bba3-59b530c_0 generated for partition 0 topic 'test-topic' owner src 2026-01-08T15:32:14.951270Z node 15 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:32:14.951306Z node 15 :PERSQUEUE DEBUG: partition.cpp:2423: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxEvent(TMessage) 2026-01-08T15:32:14.951355Z node 15 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 1, PendingWrites: 0 2026-01-08T15:32:14.951380Z node 15 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:32:14.951423Z node 15 :PERSQUEUE DEBUG: partition.cpp:2487: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxPendingCommit(TMessage) 2026-01-08T15:32:14.951475Z node 15 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 1 2026-01-08T15:32:14.951502Z node 15 :PERSQUEUE DEBUG: partition.cpp:2342: [72075186224037892][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:32:14.951533Z node 15 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:32:14.951601Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:37: [72075186224037892][Partition][0][StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2026-01-08T15:32:14.951683Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:32:14.952007Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:147: Handle TEvRequest topic: 'test-topic' requestId: 2026-01-08T15:32:14.952040Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2698: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2026-01-08T15:32:14.952167Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:32:14.952315Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:884: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|475636c0-f5cd7786-e1d7bba3-59b530c_0 2026-01-08T15:32:14.960805Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|475636c0-f5cd7786-e1d7bba3-59b530c_0 grpc read done: success: 0 data: 2026-01-08T15:32:14.960855Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|475636c0-f5cd7786-e1d7bba3-59b530c_0 grpc read failed 2026-01-08T15:32:14.961087Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:837: session v1 closed cookie: 1 sessionId: src|475636c0-f5cd7786-e1d7bba3-59b530c_0 2026-01-08T15:32:14.961117Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|475636c0-f5cd7786-e1d7bba3-59b530c_0 is DEAD 2026-01-08T15:32:14.961628Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:32:14.962674Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [15:7593013989516379545:2467] destroyed 2026-01-08T15:32:14.962731Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:32:14.962776Z node 15 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:32:14.962806Z node 15 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:32:14.962835Z node 15 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:32:14.962867Z node 15 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:32:14.962892Z node 15 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:32:14.967220Z :TODO INFO: Topic created 2026-01-08T15:32:15.034795Z node 15 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:32:15.084205Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:32:15.084290Z node 15 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72075186224037895] doesn't have tx writes info 2026-01-08T15:32:15.086445Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037896][] pipe [15:7593013993811346903:3016] connected; active server actors: 1 2026-01-08T15:32:15.092651Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:32:15.092722Z node 15 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72075186224037894] doesn't have tx writes info 2026-01-08T15:32:15.103407Z node 15 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037894][Partition][1][StateInit] bootstrapping 1 [15:7593013993811346967:2482] 2026-01-08T15:32:15.105268Z node 15 :PERSQUEUE INFO: partition.cpp:712: [72075186224037894][Partition][1][StateInit] init complete for topic 'topic1' partition 1 generation 1 [15:7593013993811346967:2482] 2026-01-08T15:32:15.105399Z node 15 :PERSQUEUE INFO: partition_mlp.cpp:146: [72075186224037894][Partition][1][StateIdle] Creating MLP consumer 'mlp-consumer' 2026-01-08T15:32:15.109386Z node 15 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037895][Partition][0][StateInit] bootstrapping 0 [15:7593013993811346971:2480] 2026-01-08T15:32:15.111222Z node 15 :PERSQUEUE INFO: partition.cpp:712: [72075186224037895][Partition][0][StateInit] init complete for topic 'topic1' partition 0 generation 1 [15:7593013993811346971:2480] 2026-01-08T15:32:15.111345Z node 15 :PERSQUEUE INFO: partition_mlp.cpp:146: [72075186224037895][Partition][0][StateIdle] Creating MLP consumer 'mlp-consumer' 2026-01-08T15:32:15.112430Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:105: [72075186224037895][0][MLP][mlp-consumer] Start MLP consumer mlp-consumer 2026-01-08T15:32:15.112630Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:356: [72075186224037895][0][MLP][mlp-consumer] Update config: RetentionPeriod: 86400.000000s Name: "mlp-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 Type: CONSUMER_TYPE_MLP KeepMessageOrder: false DeadLetterPolicyEnabled: false DeadLetterPolicy: DEAD_LETTER_POLICY_UNSPECIFIED MaxProcessingAttempts: 0 DefaultProcessingTimeoutSeconds: 0 2026-01-08T15:32:15.116379Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:105: [72075186224037894][1][MLP][mlp-consumer] Start MLP consumer mlp-consumer 2026-01-08T15:32:15.116614Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:356: [72075186224037894][1][MLP][mlp-consumer] Update config: RetentionPeriod: 86400.000000s Name: "mlp-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 Type: CONSUMER_TYPE_MLP KeepMessageOrder: false DeadLetterPolicyEnabled: false DeadLetterPolicy: DEAD_LETTER_POLICY_UNSPECIFIED MaxProcessingAttempts: 0 DefaultProcessingTimeoutSeconds: 0 2026-01-08T15:32:15.119333Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:185: [72075186224037894][1][MLP][mlp-consumer] HandleOnInit TEvKeyValue::TEvResponse 2026-01-08T15:32:15.119375Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:225: [72075186224037894][1][MLP][mlp-consumer] Initializing new consumer 2026-01-08T15:32:15.119407Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:278: [72075186224037894][1][MLP][mlp-consumer] Initializing new consumer 2026-01-08T15:32:15.119442Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:347: [72075186224037894][1][MLP][mlp-consumer] Try commit offset: 0 vs 0 2026-01-08T15:32:15.119475Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:675: [72075186224037894][1][MLP][mlp-consumer] Skip fetch: partition end offset is reached: 0 vs 0 2026-01-08T15:32:15.119507Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:295: [72075186224037894][1][MLP][mlp-consumer] Initialized 2026-01-08T15:32:15.119536Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:499: [72075186224037894][1][MLP][mlp-consumer] ProcessEventQueue 2026-01-08T15:32:15.119580Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:574: [72075186224037894][1][MLP][mlp-consumer] Persist 2026-01-08T15:32:15.119610Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:580: [72075186224037894][1][MLP][mlp-consumer] Batch is empty 2026-01-08T15:32:15.122036Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:185: [72075186224037895][0][MLP][mlp-consumer] HandleOnInit TEvKeyValue::TEvResponse 2026-01-08T15:32:15.122081Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:225: [72075186224037895][0][MLP][mlp-consumer] Initializing new consumer 2026-01-08T15:32:15.122108Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:278: [72075186224037895][0][MLP][mlp-consumer] Initializing new consumer 2026-01-08T15:32:15.122139Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:347: [72075186224037895][0][MLP][mlp-consumer] Try commit offset: 0 vs 0 2026-01-08T15:32:15.122168Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:675: [72075186224037895][0][MLP][mlp-consumer] Skip fetch: partition end offset is reached: 0 vs 0 2026-01-08T15:32:15.122197Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:295: [72075186224037895][0][MLP][mlp-consumer] Initialized 2026-01-08T15:32:15.122222Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:499: [72075186224037895][0][MLP][mlp-consumer] ProcessEventQueue 2026-01-08T15:32:15.122256Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:574: [72075186224037895][0][MLP][mlp-consumer] Persist 2026-01-08T15:32:15.122282Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:580: [72075186224037895][0][MLP][mlp-consumer] Batch is empty 2026-01-08T15:32:15.124559Z node 15 :PERSQUEUE INFO: partition_mlp.cpp:127: [72075186224037895][Partition][0][StateIdle] Updateing MLP consumer 'mlp-consumer' config 2026-01-08T15:32:15.125157Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:356: [72075186224037895][0][MLP][mlp-consumer] Update config: RetentionPeriod: 86400.000000s Name: "mlp-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 Type: CONSUMER_TYPE_MLP KeepMessageOrder: false DeadLetterPolicyEnabled: false DeadLetterPolicy: DEAD_LETTER_POLICY_UNSPECIFIED MaxProcessingAttempts: 0 DefaultProcessingTimeoutSeconds: 0 2026-01-08T15:32:15.125450Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:32:15.128644Z node 15 :PERSQUEUE INFO: partition_mlp.cpp:127: [72075186224037894][Partition][1][StateIdle] Updateing MLP consumer 'mlp-consumer' config 2026-01-08T15:32:15.129229Z node 15 :PQ_MLP_CONSUMER DEBUG: mlp_consumer.cpp:356: [72075186224037894][1][MLP][mlp-consumer] Update config: RetentionPeriod: 86400.000000s Name: "mlp-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 Type: CONSUMER_TYPE_MLP KeepMessageOrder: false DeadLetterPolicyEnabled: false DeadLetterPolicy: DEAD_LETTER_POLICY_UNSPECIFIED MaxProcessingAttempts: 0 DefaultProcessingTimeoutSeconds: 0 2026-01-08T15:32:15.129535Z node 15 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:32:15.212867Z node 15 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:141: new alter topic request |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/persqueue/pqtablet/partition/mlp/ut/unittest >> KqpScripting::SelectNullType >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 20689, MsgBus: 11497 2026-01-08T15:32:13.918022Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013987507786612:2206];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:13.918852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:14.429605Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:14.493100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:14.493242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:14.587265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:14.606651Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:14.618709Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013987507786424:2081] 1767886333847027 != 1767886333847030 2026-01-08T15:32:14.700562Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:14.935532Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:15.060757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:15.060803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:15.060817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:15.060934Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:15.624657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:15.633165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:32:18.376752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014008982623800:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.376885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.378396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014008982623810:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.378510Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.741939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:18.919526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013987507786612:2206];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:18.919635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:18.997425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014008982623906:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.997550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.997922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014008982623911:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.997955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014008982623912:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.997994Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.013808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:19.032109Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014008982623915:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:32:19.160429Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014013277591264:2605] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:20.128189Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014017572558660:2367], status: GENERIC_ERROR, issues:
:3:25: Error: Invalid value "invalid-uuid" for type Uuid 2026-01-08T15:32:20.130326Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ZmJhYjY0ZmUtYWU4MzU4MTUtYTgwNmUzZjktM2ZlZGQzNjY=, ActorId: [1:7593014008982623797:2323], ActorState: ExecuteState, LegacyTraceId: 01kef3sg0jc7bg2ywt0g66ecxb, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { position { row: 3 column: 25 } message: "Invalid value \"invalid-uuid\" for type Uuid" end_position { row: 3 column: 25 } severity: 1 } tx_id# trace_id# |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> TControlPlaneProxyTest::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyConnection >> TTxDataShardPrefixKMeansScan::BuildToBuild [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToBuildWithOverlap >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> KqpPg::TableSelect+useSink >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink >> TControlPlaneProxyTest::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteConnection >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table >> DataShardWrite::ReplaceImmediate |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |86.7%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test |86.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> NFq::TRegisterCheckTestBase::ShouldRegisterCheckNewGenerationAndTransact [GOOD] |86.7%| [TS] {BAZEL_UPLOAD} ydb/core/fq/libs/ydb/ut/unittest >> KqpPg::CopyTableSerialColumns-useSink [GOOD] >> KqpPg::CreateIndex |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |86.7%| [TM] {RESULT} ydb/core/kqp/ut/view/unittest >> TControlPlaneProxyTest::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendTestConnection >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> DataShardWrite::UpsertWithDefaults >> KqpScripting::ScanQueryDisable [GOOD] |86.7%| [TM] {RESULT} ydb/core/persqueue/pqtablet/partition/mlp/ut/unittest |86.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut >> KeyValueGRPCService::SimpleCreateAlterDropVolume [GOOD] >> KeyValueGRPCService::SimpleListPartitions [GOOD] >> KqpPg::DropTableIfExists [GOOD] >> KqpPg::DropTableIfExists_GenericQuery >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_DisabledDeadLetterPolicy [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_KeepMessagesOrder_False >> KqpScripting::StreamExecuteYqlScriptSeveralQueries [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] Test command err: Trying to start YDB, gRPC: 13284, MsgBus: 23277 2026-01-08T15:32:06.651148Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013953536738698:2193];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:06.651244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:06.723809Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:07.290436Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:07.309715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:07.309822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:07.377753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:07.438115Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:07.474454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013953536738533:2081] 1767886326612297 != 1767886326612300 2026-01-08T15:32:07.492344Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:07.660845Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:07.670478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:07.670507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:07.670515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:07.670648Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:08.158429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:08.229365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:08.407227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:08.691567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:08.868641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:11.632840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013975011576899:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.632959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.633492Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013975011576909:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.633548Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:11.654722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013953536738698:2193];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:11.660917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:12.185230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.242089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.280176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.328585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.368855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.422483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.476630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.523710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.661721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013979306545083:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.661841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.662206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013979306545090:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.662269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013979306545089:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.662305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.668312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:12.698785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013979306545094:2491], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:12.786295Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013979306545147:3777] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:15.539115Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding ... 6658146131:7762515]; 2026-01-08T15:32:18.137575Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:18.169827Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:18.247756Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:18.249339Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014007874377395:2081] 1767886338124983 != 1767886338124986 2026-01-08T15:32:18.285775Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:18.285857Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:18.289353Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:18.398553Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:18.450922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:18.450944Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:18.450953Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:18.451051Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:19.029600Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:19.038081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:19.048962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.113938Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.222069Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:19.334918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.413363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.575517Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014025054248440:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.575619Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.576491Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014025054248450:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.576565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.651232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.706130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.748564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.804389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.869644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.055898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.129641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.140759Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014007874377511:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:23.142883Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:23.220695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.361765Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014029349216615:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:23.361861Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:23.362175Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014029349216620:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:23.362220Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014029349216621:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:23.362502Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:23.366783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:23.397464Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014029349216624:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:23.482123Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014029349216675:3761] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:25.833435Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886345862, txId: 281474976710673] shutting down 2026-01-08T15:32:26.252108Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886346268, txId: 281474976710675] shutting down |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort-UseSink >> TControlPlaneProxyTest::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateBinding >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryDisable [GOOD] Test command err: Trying to start YDB, gRPC: 21912, MsgBus: 24241 2026-01-08T15:32:07.247955Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013959422032029:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:07.273688Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:07.770444Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:07.901866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:07.901983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:07.907944Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:07.909425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:07.910503Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013959422031968:2081] 1767886327211591 != 1767886327211594 2026-01-08T15:32:07.938530Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:08.126860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:08.126887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:08.126895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:08.126975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:08.210683Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:08.840388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:08.994440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:09.268990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:09.626537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:09.815951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.250495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013959422032029:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:12.254545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:12.523102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013980896870323:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.523230Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.527145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013980896870333:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.527232Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:12.888645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.938669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:12.979222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.021752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.062886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.098596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.150021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.240915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:13.450519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013985191838508:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:13.450650Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:13.451122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013985191838512:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:13.451165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013985191838514:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:13.451211Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:13.456245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:13.473091Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013985191838517:2491], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:13.553597Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013985191838570:3778] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:15.977444Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886335971, txId: 281474976710673] shutting down Trying to start YDB, gRPC: 7863, MsgBus: 17728 2026-01-08T15:32:17.181988Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014004243641912:2091];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:17.182470Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:17.319570Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:17.371993Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:17.372076Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:17.375167Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:17.400280Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:32:17.400504Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:32:17.460069Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:17.566406Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:17.566429Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:17.566438Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:17.566526Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:17.593160Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:18.219298Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:18.267401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:18.304897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:18.469574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:18.754952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:18.873583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:21.712141Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014021423512902:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:21.712219Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:21.712641Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014021423512912:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:21.712671Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:21.833959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:21.890170Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:21.963574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.014569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.106759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.182938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014004243641912:2091];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:22.183004Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:22.220476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.292749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.396274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:22.560429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014025718481088:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.560597Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.561199Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014025718481093:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.561250Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014025718481094:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.561506Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:22.565999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:22.587767Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014025718481097:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:22.693103Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014025718481148:3779] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:26.026739Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886346037, txId: 281474976710673] shutting down |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> DataShardWrite::AsyncIndexKeySizeConstraint >> KqpYql::EvaluateExpr2 [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/keyvalue/ut/unittest >> KeyValueGRPCService::SimpleListPartitions [GOOD] Test command err: 2026-01-08T15:27:11.096290Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012690262338230:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:27:11.097042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:27:11.337041Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:27:11.359993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:11.360088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:11.381678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:11.438667Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:11.453308Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:372: Grpc request proxy started, nodeid# 1, serve as static node 2026-01-08T15:27:11.453677Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:603: Subscribe to /Root 2026-01-08T15:27:11.453751Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:425: Got proxy service configuration 2026-01-08T15:27:11.454012Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:408: Subscribed for config changes 2026-01-08T15:27:11.454053Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:416: Updated app config 2026-01-08T15:27:11.454096Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:372: Grpc request proxy started, nodeid# 1, serve as static node 2026-01-08T15:27:11.454169Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:603: Subscribe to /Root 2026-01-08T15:27:11.454922Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:408: Subscribed for config changes 2026-01-08T15:27:11.454935Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:416: Updated app config 2026-01-08T15:27:11.458652Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:425: Got proxy service configuration 2026-01-08T15:27:11.459145Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.459175Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.459188Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.459262Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.486518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:27:11.486542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:27:11.486555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:27:11.486663Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:27:11.540502Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:27:11.678894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:27:11.688984Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.689082Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.689105Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.689142Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.717696Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# MakeDirectoryRequest, traceId# 01kef3g2v45cvqswtghvgaqejh, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50532, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:27:11.722915Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.722999Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.723008Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.723073Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.727768Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.727835Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.727851Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.727900Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.738004Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# CreateVolumeRequest, traceId# 01kef3g2vkffazpgkk95bv6326, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50532, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:27:11.767417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSolomonVolume, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp:380) 2026-01-08T15:27:11.770455Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.770523Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.770585Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:535: SchemeBoardUpdate /Root 2026-01-08T15:27:11.770621Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:564: Can't update SecurityState for /Root - no PublicKeys 2026-01-08T15:27:11.786790Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268828672 event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:27:11.788189Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# ListDirectoryRequest, traceId# 01kef3g2xc6qmg9mj3j91akyxk, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50532, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:27:11.789665Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268828673 event# NKikimr::TEvTablet::TEvRestored Wait iteration# 0 2026-01-08T15:27:11.790048Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:65: KeyValue# 72075186224037888 TTxInit flat Execute 2026-01-08T15:27:11.790063Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:68: KeyValue# 72075186224037888 TTxInit flat BuildScheme 2026-01-08T15:27:11.790395Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:93: KeyValue# 72075186224037888 TTxInit flat Execute returns true 2026-01-08T15:27:11.790915Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268828684 event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:27:11.791744Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:27:11.791777Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:27:11.791796Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 2 Status# OK} 2026-01-08T15:27:11.791810Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 2 Status# OK} 2026-01-08T15:27:11.792104Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# ExecuteTransactionRequest, traceId# 01kef3g2xganffsgq8wtnr1fz1, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50532, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:27:11.792435Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:27:11.792489Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 2 Status# OK} 2026-01-08T15:27:11.792528Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 1 Marker# KV52 2026-01-08T15:27:11.792537Z node 1 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 2 Marker# KV50 2026-01-08T15:27:11.792550Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 2 Marker# KV52 2026-01-08T15:27:11.792558Z node 1 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 1 Marker# KV50 2026-01-08T15:27:11.792583Z node 1 :KEYVALUE DEBUG: keyvalue_state.cpp:719: KeyValue# 72075186224037888 SendCutHistory Marker# KV51 2026-01-08T15:27:11.792606Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:324: KeyValue# 72075186224037888 TTxRegisterInitialGCCompletion Execute 2026-01-08T15:27:11.793937Z node 1 :KEYVALUE DEBUG: keyvalue_flat_impl.h:646: KeyValue# 72075186224037888 StateWork unexpected event type# 269877761 event# NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:27:11.793979Z node 1 :KEYVALUE DEBUG: keyvalue_state.cpp:2994: KeyValue# 72075186224037888 PrepareExecuteTransactionRequest Marker# KV72 2026-01-08T15:27:11.794017Z node 1 :KEYVALUE DEBUG: keyvalue_state.cpp:3267: KeyValue# 72075186224037888 Create storage request for WO, Marker# KV67 2026-01-08T15:27:11.794060Z node 1 :KEYVALUE DEBUG: keyvalue_storage_request.cpp:406: KeyValue# 72075186224037888 UpdateRequest ReadRequestsReplied# 0 ReadRequestsSent# 0 WriteRequestsReplied # 0 WriteRequestsSent# 0 GetStatusRequestsReplied # 0 GetStatusRequestsSent# 0 PatchRequestSent# 0 PatchRequestReplied# 0 Marker# KV45 2026-01-08T15:27:11.794122Z node 1 :KEYVALUE DEBUG: keyvalue ... 8310Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:78: KeyValue# 72075186224037889 TTxInit flat ReadDb Tree 2026-01-08T15:32:20.228728Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:93: KeyValue# 72075186224037889 TTxInit flat Execute returns true 2026-01-08T15:32:20.230058Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:65: KeyValue# 72075186224037888 TTxInit flat Execute 2026-01-08T15:32:20.230067Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:78: KeyValue# 72075186224037888 TTxInit flat ReadDb Tree 2026-01-08T15:32:20.230210Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:93: KeyValue# 72075186224037888 TTxInit flat Execute returns true 2026-01-08T15:32:20.231310Z node 65 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# DropVolumeRequest, traceId# 01kef3sg3ebrx0qvx9v5e3z6vv, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56712, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:32:20.240637Z node 65 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropSolomonVolume, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp:184) 2026-01-08T15:32:20.261226Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.261331Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.261377Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.261402Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.261425Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.261447Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.261472Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037888 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.261494Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037888 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.261751Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.261778Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.261804Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.261827Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037888 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037888 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.261901Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 1 Marker# KV52 2026-01-08T15:32:20.261929Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 4 Marker# KV50 2026-01-08T15:32:20.261939Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 1 Marker# KV52 2026-01-08T15:32:20.261947Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 3 Marker# KV50 2026-01-08T15:32:20.261958Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 2 Marker# KV52 2026-01-08T15:32:20.261965Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 2 Marker# KV50 2026-01-08T15:32:20.261976Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037888 Handle TEvCollectGarbageResult Cookie# 2 Marker# KV52 2026-01-08T15:32:20.261987Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037888 InitialCollectsSent# 1 Marker# KV50 2026-01-08T15:32:20.262017Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:719: KeyValue# 72075186224037888 SendCutHistory Marker# KV51 2026-01-08T15:32:20.262080Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:324: KeyValue# 72075186224037888 TTxRegisterInitialGCCompletion Execute 2026-01-08T15:32:20.262539Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037889 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.262608Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037889 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.262649Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037889 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.262679Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037889 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.262732Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037889 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.262788Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037889 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.262836Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:616: KeyValue# 72075186224037889 StateInit flat event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.262865Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:384: KeyValue# 72075186224037889 Enqueue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.263046Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037889 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 0 Channel# 2 Status# OK} 2026-01-08T15:32:20.263091Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037889 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 3 Status# OK} 2026-01-08T15:32:20.263126Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037889 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 2 Channel# 2 Status# OK} 2026-01-08T15:32:20.263306Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:672: KeyValue# 72075186224037889 Dequeue, event type# 268632582 event# TEvCollectGarbageResult {TabletId# 72075186224037889 RecordGeneration# 2 PerGenerationCounter# 3 Channel# 3 Status# OK} 2026-01-08T15:32:20.263371Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037889 Handle TEvCollectGarbageResult Cookie# 1 Marker# KV52 2026-01-08T15:32:20.263391Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037889 InitialCollectsSent# 4 Marker# KV50 2026-01-08T15:32:20.263412Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037889 Handle TEvCollectGarbageResult Cookie# 1 Marker# KV52 2026-01-08T15:32:20.263420Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037889 InitialCollectsSent# 3 Marker# KV50 2026-01-08T15:32:20.263430Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037889 Handle TEvCollectGarbageResult Cookie# 2 Marker# KV52 2026-01-08T15:32:20.263437Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037889 InitialCollectsSent# 2 Marker# KV50 2026-01-08T15:32:20.263453Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:482: KeyValue# 72075186224037889 Handle TEvCollectGarbageResult Cookie# 2 Marker# KV52 2026-01-08T15:32:20.263464Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:696: KeyValue# 72075186224037889 InitialCollectsSent# 1 Marker# KV50 2026-01-08T15:32:20.263488Z node 65 :KEYVALUE DEBUG: keyvalue_state.cpp:719: KeyValue# 72075186224037889 SendCutHistory Marker# KV51 2026-01-08T15:32:20.263563Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:324: KeyValue# 72075186224037889 TTxRegisterInitialGCCompletion Execute 2026-01-08T15:32:20.289397Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:332: KeyValue# 72075186224037889 TTxRegisterInitialGCCompletion Complete 2026-01-08T15:32:20.289542Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:332: KeyValue# 72075186224037888 TTxRegisterInitialGCCompletion Complete 2026-01-08T15:32:20.463039Z node 65 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:641: Got grpc request# ListDirectoryRequest, traceId# 01kef3sgbeapcrdxqc45afn5ye, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:56712, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2026-01-08T15:32:20.474538Z node 65 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 65, TabletId: 72075186224037889 not found 2026-01-08T15:32:20.474609Z node 65 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 65, TabletId: 72075186224037888 not found 2026-01-08T15:32:20.475460Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037889 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2026-01-08T15:32:20.476135Z node 65 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037888 OnTabletDead NKikimr::TEvTablet::TEvTabletDead >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] |86.7%| [TM] {BAZEL_UPLOAD} ydb/services/keyvalue/ut/unittest >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> TControlPlaneProxyTest::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendListBindings >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink >> TxUsage::WriteToTopic_Demo_11_Query >> KqpPg::CreateUniqComplexPgColumn-useSink [GOOD] >> KqpPg::CreateTempTable >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateExpr2 [GOOD] Test command err: Trying to start YDB, gRPC: 30214, MsgBus: 7104 2026-01-08T15:32:14.421261Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013990554465606:2228];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:14.421365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:14.702405Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:14.712863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:14.712991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:14.741168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:14.995881Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013990554465405:2081] 1767886334406927 != 1767886334406930 2026-01-08T15:32:15.032515Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:15.037334Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:15.078406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:15.078430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:15.078438Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:15.078542Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:15.426847Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:15.640195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:15.656227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:15.719831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:15.925139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:16.102487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:16.182811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:18.767153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014007734336457:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.767317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.773998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014007734336467:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:18.774163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.139166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.199325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.248500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.334683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.378846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.429154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013990554465606:2228];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:19.435124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:19.446010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.521023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.571490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:19.654293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014012029304633:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.654395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.654613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014012029304638:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.654656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014012029304639:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.654695Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:19.659502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:19.677280Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014012029304642:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:19.732638Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014012029304695:3766] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 4168, MsgBus: 63163 2026-01-08T15:32:22.864736Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014025316829866:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:22.864801Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:22.901299Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:22.993225Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:22.995098Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014025316829792:2081] 1767886342838546 != 1767886342838549 2026-01-08T15:32:23.003419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:23.003499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:23.031447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:23.202424Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:23.262986Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:23.263021Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:23.263028Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:23.263122Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:23.676226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:23.683897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:32:23.694152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.795802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.945217Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:24.041548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:24.180168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.502448Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014046791668167:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:27.502573Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:27.504165Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014046791668177:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:27.504269Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:27.594141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.645675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.689940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.728495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.785251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.831275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.864290Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014025316829866:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:27.864440Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:27.879039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:27.932146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:28.039743Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014051086636346:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.039867Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.040784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014051086636351:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.040870Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014051086636352:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.041105Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.044421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:28.064289Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014051086636355:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:32:28.141567Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014051086636406:3773] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 >> TControlPlaneProxyTest::ShouldSendListBindings [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeBinding |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |86.7%| [TM] {RESULT} ydb/services/keyvalue/ut/unittest |86.7%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> KqpPg::CreateIndex [GOOD] >> KqpPg::CreateNotNullPgColumn >> KqpScripting::SelectNullType [GOOD] >> KqpScripting::StreamDdlAndDml >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> DataShardWrite::UpsertWithDefaults [GOOD] >> DataShardWrite::WriteImmediateBadRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] Test command err: 2026-01-08T15:30:51.545757Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];fline=columnshard.cpp:110;event=initialize_shard;step=OnActivateExecutor; 2026-01-08T15:30:51.582130Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];fline=columnshard.cpp:129;event=initialize_shard;step=initialize_tiring_finished; 2026-01-08T15:30:51.582435Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2026-01-08T15:30:51.590912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:30:51.591239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:30:51.591531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:30:51.591718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:30:51.591845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:30:51.591988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:30:51.592147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:30:51.592281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:30:51.592425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:30:51.592597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.592729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:30:51.592842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:30:51.592984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:128:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:30:51.629698Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2026-01-08T15:30:51.630365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:131;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=Granules; 2026-01-08T15:30:51.630433Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2026-01-08T15:30:51.630652Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:51.630830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:30:51.630905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:30:51.630949Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2026-01-08T15:30:51.631065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2026-01-08T15:30:51.631143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:30:51.631217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:30:51.631252Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2026-01-08T15:30:51.631451Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:30:51.631545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:30:51.631606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:30:51.631641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2026-01-08T15:30:51.631741Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2026-01-08T15:30:51.631799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:30:51.631857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:30:51.631905Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2026-01-08T15:30:51.632012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:30:51.632061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:30:51.632097Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2026-01-08T15:30:51.632143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:30:51.632186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:30:51.632213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2026-01-08T15:30:51.632460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:30:51.632578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:30:51.632612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2026-01-08T15:30:51.632759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:30:51.632807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.632842Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2026-01-08T15:30:51.632891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:30:51.632943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:30:51.632984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2026-01-08T15:30:51.633044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:30:51.633106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:30:51.633140Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=19;type=CopyBlobIdsToV2; 2026-01-08T15:30:51.633271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T15:30:51.633341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLAS ... y_size=286;data_size=278;sum=26280;count=92;size_of_portion=192; 2026-01-08T15:32:29.377964Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=9404; 2026-01-08T15:32:29.378044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2026-01-08T15:32:29.379189Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1073; 2026-01-08T15:32:29.379264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10873; 2026-01-08T15:32:29.379336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=11051; 2026-01-08T15:32:29.379413Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=15; 2026-01-08T15:32:29.379527Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=66; 2026-01-08T15:32:29.379572Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11898; 2026-01-08T15:32:29.379789Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=151; 2026-01-08T15:32:29.379956Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=100; 2026-01-08T15:32:29.380152Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=153; 2026-01-08T15:32:29.380362Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=145; 2026-01-08T15:32:29.383106Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2682; 2026-01-08T15:32:29.385730Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2537; 2026-01-08T15:32:29.385826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2026-01-08T15:32:29.385885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2026-01-08T15:32:29.385933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2026-01-08T15:32:29.386030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=55; 2026-01-08T15:32:29.386080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2026-01-08T15:32:29.386213Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=91; 2026-01-08T15:32:29.386268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2026-01-08T15:32:29.386361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=53; 2026-01-08T15:32:29.386455Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=55; 2026-01-08T15:32:29.386553Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=59; 2026-01-08T15:32:29.386634Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=31667; 2026-01-08T15:32:29.386818Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=110437896;raw_bytes=171489958;count=23;records=1955000} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2026-01-08T15:32:29.386957Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];process=SwitchToWork;fline=columnshard.cpp:78;event=initialize_shard;step=SwitchToWork; 2026-01-08T15:32:29.387032Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];process=SwitchToWork;fline=columnshard.cpp:81;event=initialize_shard;step=SignalTabletActive; 2026-01-08T15:32:29.387116Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];process=SwitchToWork;fline=columnshard_impl.cpp:1532;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2026-01-08T15:32:29.387186Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];process=SwitchToWork;fline=column_engine_logs.cpp:543;event=OnTieringModified;new_count_tierings=0; 2026-01-08T15:32:29.387356Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:448;event=EnqueueBackgroundActivities;periodic=0; 2026-01-08T15:32:29.387435Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=tiling.cpp:693;message=tiling compaction: actualize called; 2026-01-08T15:32:29.387480Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=tiling.cpp:710;message=tiling compaction: returning DoGetUsefulMetric zero; 2026-01-08T15:32:29.387532Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=column_engine_logs.cpp:260;event=StartCleanup;portions_count=0; 2026-01-08T15:32:29.387606Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=column_engine_logs.cpp:341;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2026-01-08T15:32:29.387667Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:803;background=cleanup;skip_reason=no_changes; 2026-01-08T15:32:29.387714Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:835;background=cleanup;skip_reason=no_changes; 2026-01-08T15:32:29.387850Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;fline=columnshard_impl.cpp:757;background=ttl;skip_reason=no_changes; 2026-01-08T15:32:29.388082Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: fline=columnshard.cpp:505;event=TEvReportBaseStatistics;ReportBaseStatisticsPeriodMs=60000;scheduleDuration=60.135000s; 2026-01-08T15:32:29.390672Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:269;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2026-01-08T15:32:29.390950Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:260;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2026-01-08T15:32:29.391020Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;event=EnqueueBackgroundActivities;periodic=0; 2026-01-08T15:32:29.391108Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=tiling.cpp:710;message=tiling compaction: returning DoGetUsefulMetric zero; 2026-01-08T15:32:29.391177Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:260;event=StartCleanup;portions_count=0; 2026-01-08T15:32:29.391249Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:341;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2026-01-08T15:32:29.391314Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:803;background=cleanup;skip_reason=no_changes; 2026-01-08T15:32:29.391373Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:835;background=cleanup;skip_reason=no_changes; 2026-01-08T15:32:29.391476Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:172;event=skip_actualization;waiting=1.000000s; 2026-01-08T15:32:29.391546Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:757;background=ttl;skip_reason=no_changes; 2026-01-08T15:32:29.399368Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvReportExecutorStatistics;fline=columnshard.cpp:515;event=TEvReportExecutorStatistics;ReportExecutorStatisticsPeriodMs=60000;scheduleDuration=60.194000s; 2026-01-08T15:32:29.399455Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:2370:4350];ev=NKikimr::NColumnShard::TEvPrivate::TEvReportExecutorStatistics;fline=columnshard.cpp:447;No CurrentSchemeShardId=9437184; |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_rw/unittest >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table >> TControlPlaneProxyTest::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyBinding >> TxUsage::WriteToTopic_Demo_14_Table >> TPQTest::TestSourceIdDropByUserWrites [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_streaming/ut/unittest >> TControlPlaneProxyTest::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteBinding >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TSchemeShardSysViewsUpdateTest::RestoreAbsentSysViews |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |86.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> KqpPg::DropTableIfExists_GenericQuery [GOOD] >> KqpPg::EquiJoin+useSink >> DataShardWrite::AsyncIndexKeySizeConstraint [GOOD] >> DataShardWrite::DeleteImmediate >> TTxDataShardPrefixKMeansScan::BuildToBuildWithOverlap [GOOD] >> TTxDataShardRecomputeKMeansScan::BadRequest >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/sqs_topic_ut/ydb-core-http_proxy-ut-sqs_topic_ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/sqs_topic_ut/ydb-core-http_proxy-ut-sqs_topic_ut |86.7%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/sqs_topic_ut/ydb-core-http_proxy-ut-sqs_topic_ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] Test command err: Trying to start YDB, gRPC: 14701, MsgBus: 30092 2026-01-08T15:32:21.763653Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014020128885243:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:21.766618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:22.065717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:22.065820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:22.070555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:22.102869Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:22.131596Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014020128885197:2081] 1767886341739470 != 1767886341739473 2026-01-08T15:32:22.147741Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:22.258807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:22.258830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:22.258841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:22.258911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:22.297197Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:22.769343Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:22.894620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:22.912388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:22.996494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.199968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.384459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:23.462068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.321270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014037308756265:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:25.321395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:25.321766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014037308756275:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:25.321841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:25.781069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.833083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.881728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.918946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.957559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.011257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.076056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.130368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.214309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014041603724447:2483], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:26.214426Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:26.214721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014041603724452:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:26.214756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014041603724453:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:26.215012Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:26.219169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:26.235208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710670, at schemeshard: 72057594046644480 2026-01-08T15:32:26.235479Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014041603724456:2488], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:26.350701Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014041603724509:3769] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:26.750044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014020128885243:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:26.750123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migra ... r=TTableExistsActor;event=undelivered;self_id=[2:7593014055748246209:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:29.340274Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:29.446639Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:29.667181Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:29.667254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:29.673865Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014055748246185:2081] 1767886349330431 != 1767886349330434 2026-01-08T15:32:29.703627Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:29.718483Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:29.735284Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:29.865801Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:29.865822Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:29.865829Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:29.865897Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:30.060946Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:30.275466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:30.282663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:32:30.296382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:30.360016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:30.453174Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:30.526544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:30.601952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.155679Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014072928117245:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.155773Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.156394Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014072928117255:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.156466Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.219415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.254595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.294785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.335454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.373746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.412750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.464252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.522054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.638713Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014072928118130:2483], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.638800Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.639010Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014072928118135:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.639060Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014072928118136:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.639099Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:33.643487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:33.660329Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014072928118139:2488], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:32:33.729823Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014072928118190:3772] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:34.338105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014055748246209:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:34.338212Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Result: [[[[101u]]];[[[102u]]];[[[103u]]];[[[104u]]];[[[105u]]]] |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false >> KqpPg::CreateTempTable [GOOD] >> KqpPg::CreateTempTableSerial |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |86.7%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink >> TSchemeShardSysViewsUpdateTest::RestoreAbsentSysViews [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-false >> KqpPg::CreateNotNullPgColumn [GOOD] >> KqpPg::CreateSequence >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort-UseSink [GOOD] >> DataShardVolatile::DistributedWriteAsymmetricExecute >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewsUpdateTest::RestoreAbsentSysViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:32:37.363181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:32:37.363284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:32:37.363354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:32:37.363405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:32:37.363461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:32:37.363505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:32:37.363567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:32:37.363670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:32:37.364627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:32:37.364992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:32:37.463184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:32:37.463262Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:37.477035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:32:37.477457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:32:37.477669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:32:37.493312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:32:37.493855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:32:37.494713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:32:37.495780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:32:37.502278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:32:37.502565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:32:37.504040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:32:37.504119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:32:37.504277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:32:37.504333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:32:37.506141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:32:37.506392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:32:37.771279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.772516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.772692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.772776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.772845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.772917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:32:37.773745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... ard DescribePath, at schemeshard: 72057594046678944, txId: 281474976720657, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:32:39.243850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:32:39.243919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:763:2739], at schemeshard: 72057594046678944, txId: 281474976720657, path id: 2 2026-01-08T15:32:39.243969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:763:2739], at schemeshard: 72057594046678944, txId: 281474976720657, path id: 38 2026-01-08T15:32:39.244189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046678944 2026-01-08T15:32:39.244269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 281474976720657:0 ProgressState 2026-01-08T15:32:39.244438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976720657:0 progress is 1/1 2026-01-08T15:32:39.244481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976720657 ready parts: 1/1 2026-01-08T15:32:39.244577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976720657:0 progress is 1/1 2026-01-08T15:32:39.244618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976720657 ready parts: 1/1 2026-01-08T15:32:39.244670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976720657, ready parts: 1/1, is published: false 2026-01-08T15:32:39.244722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976720657 ready parts: 1/1 2026-01-08T15:32:39.244777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976720657:0 2026-01-08T15:32:39.244825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976720657:0 2026-01-08T15:32:39.244949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:32:39.245021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976720657, publications: 2, subscribers: 1 2026-01-08T15:32:39.245082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976720657, [OwnerId: 72057594046678944, LocalPathId: 2], 40 2026-01-08T15:32:39.245119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976720657, [OwnerId: 72057594046678944, LocalPathId: 38], 2 2026-01-08T15:32:39.248252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 40 PathOwnerId: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.248394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 40 PathOwnerId: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.248494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976720657 2026-01-08T15:32:39.248544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976720657, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 40 2026-01-08T15:32:39.248596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 36 2026-01-08T15:32:39.249897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.249987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.250026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976720657 2026-01-08T15:32:39.250071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976720657, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 2 2026-01-08T15:32:39.250101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:32:39.250171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976720657, subscribers: 1 2026-01-08T15:32:39.250213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:767:2743] 2026-01-08T15:32:39.254332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.254955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976720657 2026-01-08T15:32:39.255172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:213: SysViewsRosterUpdate# [1:767:2743] at schemeshard: 72057594046678944 Handle TEvNotifyTxCompletionResult, create sys view '/MyRoot/.sys/ds_pdisks' 2026-01-08T15:32:39.255242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:221: SysViewsRosterUpdate# [1:767:2743] at schemeshard: 72057594046678944 Send TEvRosterUpdateFinished Leader for TabletID 72057594046678944 is [1:711:2698] sender: [1:795:2058] recipient: [1:15:2062] 2026-01-08T15:32:39.324982Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/partition_stats" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:32:39.325317Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/partition_stats" took 377us result status StatusSuccess 2026-01-08T15:32:39.325724Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/partition_stats" PathDescription { Self { Name: "partition_stats" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 281474976710685 CreateStep: 5000021 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "metadata@system" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 36 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "partition_stats" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:32:39.326592Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/ds_pdisks" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:32:39.326845Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/ds_pdisks" took 250us result status StatusSuccess 2026-01-08T15:32:39.327944Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/ds_pdisks" PathDescription { Self { Name: "ds_pdisks" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 281474976720657 CreateStep: 5000039 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "metadata@system" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 36 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "ds_pdisks" Type: EPDisks SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_sysview/unittest >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-true >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_proxy/ut/unittest >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] Test command err: 2026-01-08T15:28:22.943305Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:682: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2026-01-08T15:28:23.361123Z node 2 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:752: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:24.139397Z node 3 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:817: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:24.934540Z node 4 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:884: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:25.527957Z node 5 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:950: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:26.013328Z node 6 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1025: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:26.468447Z node 7 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1090: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:26.995792Z node 8 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1158: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:27.450688Z node 9 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1223: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2026-01-08T15:28:27.902652Z node 10 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:28.576335Z node 11 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:29.019915Z node 12 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:29.658486Z node 13 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1507: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:30.372460Z node 14 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1572: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:31.052466Z node 15 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1642: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:31.493761Z node 16 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1642: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:31.987149Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1793: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:32.467043Z node 18 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:33.546358Z node 19 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:34.018245Z node 20 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1972: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:34.574814Z node 21 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2126: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:35.224340Z node 22 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2191: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:35.985344Z node 23 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2256: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:28:36.560798Z node 24 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2402: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:07.849449Z node 72 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:682: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2026-01-08T15:29:08.611590Z node 73 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:752: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:09.480863Z node 74 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:817: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:10.298585Z node 75 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:884: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:10.897518Z node 76 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:950: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:11.599636Z node 77 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1025: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:12.472603Z node 78 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1090: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:13.051123Z node 79 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1158: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:13.732527Z node 80 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1223: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2026-01-08T15:29:14.595709Z node 81 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:15.506872Z node 82 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:16.224296Z node 83 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:17.047490Z node 84 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1507: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:17.886192Z node 85 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1572: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:18.743948Z node 86 :YQ_CONTROL_PLANE_STORAG ... L_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:21.706437Z node 90 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:22.887014Z node 91 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1972: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:24.136939Z node 92 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2126: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:25.178273Z node 93 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2191: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:26.195512Z node 94 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2256: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:29:27.111785Z node 95 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2402: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:30:56.984214Z node 163 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:682: CreateQueryRequest, validation failed: test_user_3@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2026-01-08T15:31:02.622156Z node 166 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:884: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:04.442067Z node 167 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:950: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:05.762875Z node 168 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1025: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:06.938617Z node 169 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1090: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:08.067709Z node 170 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1158: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:12.985461Z node 173 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:18.205940Z node 176 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1642: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:19.631820Z node 177 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1793: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:21.016233Z node 178 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:22.231855Z node 179 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1972: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:26.025447Z node 182 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2256: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:27.219640Z node 183 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2402: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:30.038710Z node 185 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:752: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:31.400774Z node 186 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:817: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:34.622679Z node 188 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:950: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:36.080223Z node 189 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1025: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:37.313245Z node 190 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1090: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:40.775610Z node 192 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1223: ListJobsRequest, validation failed: test_user_4@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2026-01-08T15:31:42.973938Z node 193 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:44.528576Z node 194 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1359: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:46.197363Z node 195 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1507: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:48.209803Z node 196 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1572: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:49.824733Z node 197 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1642: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:51.074136Z node 198 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1793: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:52.281254Z node 199 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1914: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:53.453030Z node 200 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1972: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:55.216544Z node 201 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2126: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:56.714950Z node 202 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2191: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:31:58.623509Z node 203 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2256: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2026-01-08T15:32:00.057641Z node 204 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2402: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 |86.7%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/control_plane_proxy/ut/unittest >> BasicUsage::CreateTopicWithSharedConsumer_KeepMessagesOrder_False [GOOD] >> BasicUsage::CreateTopicWithSharedConsumer_KeepMessagesOrder_True >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow >> TxUsage::WriteToTopic_Demo_42_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::IncrementImmediate >> KqpScripting::StreamDdlAndDml [GOOD] >> TxUsage::WriteToTopic_Demo_43_Table >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate >> TExtSubDomainTest::DeclareAndDrop-EnableRealSystemViewPaths-false |86.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |86.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |86.7%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test |86.7%| [TM] {RESULT} ydb/core/fq/libs/control_plane_proxy/ut/unittest |86.7%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-false [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamDdlAndDml [GOOD] Test command err: Trying to start YDB, gRPC: 20212, MsgBus: 17078 2026-01-08T15:32:23.963743Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014030218469547:2204];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:23.963934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:24.458690Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:24.499680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:24.499821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:24.572481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:24.628125Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:24.722054Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:24.818932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:24.818961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:24.818968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:24.819060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:24.970798Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:25.344828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:25.379653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:25.520744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:25.848168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.140832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:26.250418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:28.810557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014051693307709:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.810684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.813768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014051693307719:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.813855Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:28.964362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014030218469547:2204];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:28.964468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:29.205268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.269566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.324346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.391547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.484177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.592827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.676639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.785954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:29.959779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014055988275888:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:29.959874Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:29.960267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014055988275894:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:29.960267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014055988275893:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:29.960306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:29.966100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:29.981411Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014055988275897:2490], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:30.063297Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014060283243246:3766] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:32.026776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:6 ... ;send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:33.631149Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:33.841932Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:33.869243Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:33.878486Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:33.894363Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:33.898555Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014072925453103:2081] 1767886353606727 != 1767886353606730 2026-01-08T15:32:33.943362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:34.057737Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:34.231004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:34.231030Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:34.231038Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:34.231115Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:34.617814Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:35.018930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:35.040250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:35.169828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:35.453153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:35.539599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.320183Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014094400291450:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.320287Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.320699Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014094400291460:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.320750Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.466154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.501911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.542586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.579846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.609561Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014072925453205:2140];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:38.609632Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:38.619837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.676685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.737938Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.824032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.933492Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014094400292334:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.933594Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.933624Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014094400292339:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.933829Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014094400292341:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.933872Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.936839Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:38.956466Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014094400292342:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:32:39.033636Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014098695259691:3767] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:41.064469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:41.901796Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886361927, txId: 281474976715675] shutting down |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-true [GOOD] |86.8%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-false [GOOD] Test command err: 2026-01-08T15:32:40.018832Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014100181449369:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:40.018885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:40.614426Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: dc-1/.metadata/script_executions 2026-01-08T15:32:40.627116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:40.627210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:40.631503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:40.706439Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014095886482042:2081] 1767886360011182 != 1767886360011185 2026-01-08T15:32:40.760459Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:40.796795Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: dc-1/.metadata/script_executions 2026-01-08T15:32:41.050724Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:41.105437Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014100181449602:2105] Handle TEvNavigate describe path dc-1 2026-01-08T15:32:41.105472Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014104476417216:2270] HANDLE EvNavigateScheme dc-1 2026-01-08T15:32:41.105570Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014100181449609:2108], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:41.105660Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014100181449609:2108], cookie# 1 2026-01-08T15:32:41.118700Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449813:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449810:2206], cookie# 1 2026-01-08T15:32:41.118806Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449814:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449811:2206], cookie# 1 2026-01-08T15:32:41.118824Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449816:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449812:2206], cookie# 1 2026-01-08T15:32:41.118835Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482010:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449813:2206], cookie# 1 2026-01-08T15:32:41.118846Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482013:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449814:2206], cookie# 1 2026-01-08T15:32:41.118871Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482016:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449816:2206], cookie# 1 2026-01-08T15:32:41.118914Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449813:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482010:2049], cookie# 1 2026-01-08T15:32:41.118933Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449814:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482013:2052], cookie# 1 2026-01-08T15:32:41.118950Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449816:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482016:2055], cookie# 1 2026-01-08T15:32:41.119013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449810:2206], cookie# 1 2026-01-08T15:32:41.119037Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014100181449800:2206][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:41.119062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449811:2206], cookie# 1 2026-01-08T15:32:41.119088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014100181449800:2206][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:41.119126Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449812:2206], cookie# 1 2026-01-08T15:32:41.119148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014100181449800:2206][/dc-1] Sync cookie mismatch: sender# [1:7593014100181449812:2206], cookie# 1, current cookie# 0 2026-01-08T15:32:41.119260Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014100181449609:2108], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:41.125434Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014100181449609:2108], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014100181449800:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:41.125552Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014100181449609:2108], cacheItem# { Subscriber: { Subscriber: [1:7593014100181449800:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:41.136639Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014104476417217:2271], recipient# [1:7593014104476417216:2270], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:41.136731Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014104476417216:2270] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:41.226603Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014104476417216:2270] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:41.230101Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014104476417216:2270] Handle TEvDescribeSchemeResult Forward to# [1:7593014104476417215:2269] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:41.235523Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:7593014100181449602:2105] Handle TEvProposeTransaction 2026-01-08T15:32:41.235554Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:7593014100181449602:2105] TxId# 281474976710657 ProcessProposeTransaction 2026-01-08T15:32:41.235608Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:7593014100181449602:2105] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7593014104476417222:2275] 2026-01-08T15:32:41.519679Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:7593014104476417222:2275] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction ... fy# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0 PathId: Partial: 0 } 2026-01-08T15:32:41.601119Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014100181449609:2108], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014104476417264:2310] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1767886361633 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:41.601219Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014100181449609:2108], cacheItem# { Subscriber: { Subscriber: [1:7593014104476417264:2310] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1767886361633 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:41.601369Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014104476417271:2311], recipient# [1:7593014104476417263:2309], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:41.601403Z node 1 :TX_PROXY INFO: describe.cpp:354: Actor# [1:7593014104476417263:2309] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 2026-01-08T15:32:41.622237Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014100181449602:2105] Handle TEvNavigate describe path /dc-1 2026-01-08T15:32:41.622279Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014104476417273:2313] HANDLE EvNavigateScheme /dc-1 2026-01-08T15:32:41.622761Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014100181449609:2108], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:41.622868Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014100181449609:2108], cookie# 4 2026-01-08T15:32:41.622928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449813:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449810:2206], cookie# 4 2026-01-08T15:32:41.622944Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449814:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449811:2206], cookie# 4 2026-01-08T15:32:41.622960Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014100181449816:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449812:2206], cookie# 4 2026-01-08T15:32:41.622997Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482010:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449813:2206], cookie# 4 2026-01-08T15:32:41.623043Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482013:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449814:2206], cookie# 4 2026-01-08T15:32:41.623085Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014095886482016:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014100181449816:2206], cookie# 4 2026-01-08T15:32:41.623130Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449813:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482010:2049], cookie# 4 2026-01-08T15:32:41.623155Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449814:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482013:2052], cookie# 4 2026-01-08T15:32:41.623190Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014100181449816:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014095886482016:2055], cookie# 4 2026-01-08T15:32:41.623235Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449810:2206], cookie# 4 2026-01-08T15:32:41.623264Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014100181449800:2206][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:41.623285Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449811:2206], cookie# 4 2026-01-08T15:32:41.623305Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014100181449800:2206][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:41.623329Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014100181449800:2206][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014100181449812:2206], cookie# 4 2026-01-08T15:32:41.623341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014100181449800:2206][/dc-1] Sync cookie mismatch: sender# [1:7593014100181449812:2206], cookie# 4, current cookie# 0 2026-01-08T15:32:41.623383Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014100181449609:2108], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:41.623474Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014100181449609:2108], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014100181449800:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886361605 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:41.623552Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014100181449609:2108], cacheItem# { Subscriber: { Subscriber: [1:7593014100181449800:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886361605 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2026-01-08T15:32:41.623722Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014104476417274:2314], recipient# [1:7593014104476417273:2313], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:41.623758Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014104476417273:2313] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:41.623829Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014104476417273:2313] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:41.624514Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014104476417273:2313] Handle TEvDescribeSchemeResult Forward to# [1:7593014104476417272:2312] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1767886361605 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> TTxDataShardRecomputeKMeansScan::BadRequest [GOOD] >> TTxDataShardRecomputeKMeansScan::MainTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndLs-EnableRealSystemViewPaths-true [GOOD] Test command err: 2026-01-08T15:32:41.423615Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014104164031700:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:41.423669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:41.731897Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: dc-1/.metadata/script_executions 2026-01-08T15:32:41.733474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:41.733583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:41.777258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:41.868681Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:41.871181Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014104164031675:2081] 1767886361413081 != 1767886361413084 2026-01-08T15:32:41.968691Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: dc-1/.metadata/script_executions 2026-01-08T15:32:42.110521Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014104164031937:2105] Handle TEvNavigate describe path dc-1 2026-01-08T15:32:42.110570Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014108458999739:2464] HANDLE EvNavigateScheme dc-1 2026-01-08T15:32:42.110694Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014104164031943:2107], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:42.110781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014104164031943:2107], cookie# 1 2026-01-08T15:32:42.112534Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032144:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032141:2187], cookie# 1 2026-01-08T15:32:42.112572Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032145:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032142:2187], cookie# 1 2026-01-08T15:32:42.112589Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032146:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032143:2187], cookie# 1 2026-01-08T15:32:42.112623Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031643:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032144:2187], cookie# 1 2026-01-08T15:32:42.112650Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031646:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032145:2187], cookie# 1 2026-01-08T15:32:42.112689Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031649:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032146:2187], cookie# 1 2026-01-08T15:32:42.112725Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032144:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031643:2049], cookie# 1 2026-01-08T15:32:42.112740Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032145:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031646:2052], cookie# 1 2026-01-08T15:32:42.112768Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032146:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031649:2055], cookie# 1 2026-01-08T15:32:42.112818Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032141:2187], cookie# 1 2026-01-08T15:32:42.112855Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014104164032097:2187][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:42.112877Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032142:2187], cookie# 1 2026-01-08T15:32:42.112918Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014104164032097:2187][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:42.112951Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032143:2187], cookie# 1 2026-01-08T15:32:42.112971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014104164032097:2187][/dc-1] Sync cookie mismatch: sender# [1:7593014104164032143:2187], cookie# 1, current cookie# 0 2026-01-08T15:32:42.113042Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014104164031943:2107], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:42.113142Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014104164031943:2107], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014104164032097:2187] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:42.113231Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014104164031943:2107], cacheItem# { Subscriber: { Subscriber: [1:7593014104164032097:2187] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:42.129138Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014108458999740:2465], recipient# [1:7593014108458999739:2464], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:42.129220Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014108458999739:2464] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:42.195260Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014108458999739:2464] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:42.208447Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014108458999739:2464] Handle TEvDescribeSchemeResult Forward to# [1:7593014108458999738:2463] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 67 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 36 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:42.214812Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:7593014104164031937:2105] Handle TEvProposeTransaction 2026-01-08T15:32:42.214838Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:7593014104164031937:2105] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:32:42.214893Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:7593014104164031937:2105] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7593014108458999744:2469] 2026-01-08T15:32:42.359104Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:7593014108458999744:2469] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" ... cache.cpp:2526: ResolveCacheItem: self# [1:7593014104164031943:2107], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014108458999790:2502] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1767886362445 PathId: [OwnerId: 72057594046644480, LocalPathId: 38] DomainId: [OwnerId: 72057594046644480, LocalPathId: 38] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:42.423506Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014104164031943:2107], cacheItem# { Subscriber: { Subscriber: [1:7593014108458999790:2502] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 9 TableKind: 0 Created: 1 CreateStep: 1767886362445 PathId: [OwnerId: 72057594046644480, LocalPathId: 38] DomainId: [OwnerId: 72057594046644480, LocalPathId: 38] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:42.423687Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014108458999797:2503], recipient# [1:7593014108458999789:2501], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:38:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 38] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 38] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:42.423721Z node 1 :TX_PROXY INFO: describe.cpp:354: Actor# [1:7593014108458999789:2501] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 2026-01-08T15:32:42.443003Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014104164031937:2105] Handle TEvNavigate describe path /dc-1 2026-01-08T15:32:42.443047Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014108458999799:2505] HANDLE EvNavigateScheme /dc-1 2026-01-08T15:32:42.443147Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014104164031943:2107], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:42.443248Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014104164031943:2107], cookie# 4 2026-01-08T15:32:42.443296Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032144:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032141:2187], cookie# 4 2026-01-08T15:32:42.443309Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032145:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032142:2187], cookie# 4 2026-01-08T15:32:42.443324Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014104164032146:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032143:2187], cookie# 4 2026-01-08T15:32:42.443347Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031643:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032144:2187], cookie# 4 2026-01-08T15:32:42.443381Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031646:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032145:2187], cookie# 4 2026-01-08T15:32:42.443409Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014104164031649:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014104164032146:2187], cookie# 4 2026-01-08T15:32:42.443448Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032144:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031643:2049], cookie# 4 2026-01-08T15:32:42.443485Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032145:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031646:2052], cookie# 4 2026-01-08T15:32:42.443518Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014104164032146:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164031649:2055], cookie# 4 2026-01-08T15:32:42.443546Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032141:2187], cookie# 4 2026-01-08T15:32:42.443565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014104164032097:2187][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:42.443579Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032142:2187], cookie# 4 2026-01-08T15:32:42.443596Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014104164032097:2187][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:42.443621Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014104164032097:2187][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 Cluster State: { } }: sender# [1:7593014104164032143:2187], cookie# 4 2026-01-08T15:32:42.443631Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014104164032097:2187][/dc-1] Sync cookie mismatch: sender# [1:7593014104164032143:2187], cookie# 4, current cookie# 0 2026-01-08T15:32:42.443667Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014104164031943:2107], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:42.443743Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014104164031943:2107], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014104164032097:2187] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886362424 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:42.443825Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014104164031943:2107], cacheItem# { Subscriber: { Subscriber: [1:7593014104164032097:2187] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886362424 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2026-01-08T15:32:42.443994Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014108458999800:2506], recipient# [1:7593014108458999799:2505], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:42.444038Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014108458999799:2505] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:42.444091Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014108458999799:2505] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:42.444556Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:42.444738Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014108458999799:2505] Handle TEvDescribeSchemeResult Forward to# [1:7593014108458999798:2504] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 128 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1767886362424 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 37 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 >> KqpPg::CreateTempTableSerial [GOOD] >> KqpPg::DropSequence >> ResultFormatter::Void [GOOD] >> ResultFormatter::VariantTuple [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |86.8%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> KqpPg::EquiJoin+useSink [GOOD] >> KqpPg::EquiJoin-useSink >> TxUsage::WriteToTopic_Demo_14_Query >> TExtSubDomainTest::DeclareAndDrop-EnableRealSystemViewPaths-false [GOOD] >> TExtSubDomainTest::DeclareAndDrop-EnableRealSystemViewPaths-true >> ResultFormatter::List [GOOD] >> ResultFormatter::Null [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> DataShardWrite::IncrementImmediate [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/result_formatter/ut/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query >> KqpPg::CreateSequence [FAIL] >> KqpPg::AlterSequence >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::Tagged [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/result_formatter/ut/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/fq/libs/result_formatter/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithData-Reboot >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::PreparedDistributedWritePageFault ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 27421, MsgBus: 13076 2026-01-08T15:31:27.507714Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013786110241806:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:27.508089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:27.584894Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:27.956302Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:27.975012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:27.975127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:28.025685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:28.052952Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:28.262416Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:28.274963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:28.275011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:28.275028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:28.275155Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:28.537689Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:28.895032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:28.907709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:31.528980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013803290111845:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:31.529081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013803290111810:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:31.529405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:31.536142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013803290111852:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:31.536280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:31.538547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:31.572616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013803290111851:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:31.764650Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013803290111904:2544] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:31.857534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:32.511028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013786110241806:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:32.511101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25550, MsgBus: 24985 2026-01-08T15:31:33.522723Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593013812513495489:2252];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:33.522796Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:33.566588Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:33.709738Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:33.714479Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013812513495275:2081] 1767886293492898 != 1767886293492901 2026-01-08T15:31:33.736564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:33.736652Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:33.798010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:33.858554Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:34.017231Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:34.017254Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:34.017261Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:34.017342Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:34.518748Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:34.601042Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:37.710621Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013829693365355:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:37.710628Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013829693365345:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:37.710688Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:37.710899Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013829693365360:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:37.710949Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:37.713632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:37.726308Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013829693365359:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:37.783987Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013829693365412:2537] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31: ... 426066292:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:36.487396Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:37.590794Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014089195870934:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:37.590893Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014089195870926:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:37.591115Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:37.591975Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014089195870941:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:37.592050Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:37.596553Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:37.616025Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7593014089195870940:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:32:37.701060Z node 10 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [10:7593014089195870993:2542] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:37.854889Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.060113Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:38.169319Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [10:7593014093490838523:2358], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2026-01-08T15:32:38.170907Z node 10 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=10&id=YjJhODk2NmQtOWUyYWY5My1kM2FkN2VlYi0yZjFhMzRhZA==, ActorId: [10:7593014093490838521:2357], ActorState: ExecuteState, LegacyTraceId: 01kef3t1kbekf6j8167d14sfvx, ReplyQueryCompileError, remove tx status# BAD_REQUEST issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Missing not null column in input: c. All not null columns should be initialized" end_position { row: 1 column: 1 } issue_code: 2032 severity: 1 } } } tx_id# trace_id# Trying to start YDB, gRPC: 24642, MsgBus: 31385 2026-01-08T15:32:39.801596Z node 11 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7593014096391280541:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:39.801726Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:39.847958Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:39.946931Z node 11 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:39.978259Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:39.978393Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:40.001958Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:40.014211Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:40.126760Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:40.126787Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:40.126798Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:40.126933Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:40.814723Z node 11 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:41.018887Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:41.039320Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:44.806456Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7593014096391280541:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:44.806569Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:45.059764Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014122161085126:2323], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:45.059905Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014122161085155:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:45.060073Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:45.084405Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:45.158367Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014122161085165:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:32:45.224183Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014122161085227:2542] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:45.313385Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:45.606914Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:45.759430Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [11:7593014122161085460:2356], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2026-01-08T15:32:45.762916Z node 11 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=11&id=MjE0Njg3ZjMtMTA2ZDQxNzgtMTI2YzBhMzItNDViNzQzOWU=, ActorId: [11:7593014122161085458:2355], ActorState: ExecuteState, LegacyTraceId: 01kef3t90h3zpbq8s4t1twrb06, ReplyQueryCompileError, remove tx status# BAD_REQUEST issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Missing not null column in input: c. All not null columns should be initialized" end_position { row: 1 column: 1 } issue_code: 2032 severity: 1 } } } tx_id# trace_id# |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |86.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 64708, MsgBus: 13521 2026-01-08T15:31:30.143048Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013802451832872:2176];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:30.143144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:30.184847Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:30.602407Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:30.688252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:30.688386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:30.754306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:30.774475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013802451832719:2081] 1767886290049304 != 1767886290049307 2026-01-08T15:31:30.786051Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:30.822509Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:31.029249Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:31.029273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:31.029279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:31.029345Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:31.157797Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:31.684095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:31.703819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:34.987447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013819631702791:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:34.987591Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:34.988079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013819631702801:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:34.988131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.036415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:35.146436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013802451832872:2176];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:35.146526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:35.301619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013823926670196:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.301721Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.302815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013823926670199:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.302883Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.329062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:35.413002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013823926670275:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.413129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.414740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013823926670281:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.414826Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013823926670282:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.415167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:35.420077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:35.449493Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013823926670285:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:31:35.580466Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013823926670336:2646] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 16946, MsgBus: 2386 2026-01-08T15:31:37.531615Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593013830509127261:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:37.531669Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:37.630888Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:37.741324Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:37.742517Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013830509127033:2081] 1767886297468399 != 1767886297468402 2026-01-08T15:31:37.750738Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:37.750790Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:37.776794Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:37.920217Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:37.961427Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:37.961450Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:37.961456Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:37.961531Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:38.464516Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TE ... kload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.730173Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014091764292311:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.730247Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014091764292310:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.730466Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:38.737591Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:38.756555Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7593014091764292314:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:32:38.842950Z node 10 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [10:7593014091764292365:2546] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:38.913132Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [10:7593014091764292374:2336], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2026-01-08T15:32:38.919346Z node 10 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=10&id=YWI4OGM4NmQtZWM2N2U5Y2EtYTQ3ZWU2MzQtNzc2YTI4ZjM=, ActorId: [10:7593014091764292296:2326], ActorState: ExecuteState, LegacyTraceId: 01kef3sx78ftfp8qwj8zqqdb1x, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiCreateTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: \"text\"\n" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id#
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" Trying to start YDB, gRPC: 18869, MsgBus: 30480 2026-01-08T15:32:40.499859Z node 11 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7593014102352755004:2253];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:40.500173Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:40.546712Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:40.712162Z node 11 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:40.738507Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [11:7593014102352754781:2081] 1767886360422887 != 1767886360422890 2026-01-08T15:32:40.743918Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:40.744042Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:40.782012Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:40.782296Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:40.983163Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:40.983193Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:40.983203Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:40.983329Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:41.289781Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:41.478520Z node 11 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:42.004827Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:42.017898Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:32:45.482443Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7593014102352755004:2253];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:45.482544Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:46.946115Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014128122559437:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:46.946418Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:46.947292Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014128122559463:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:46.947425Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:46.947846Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014128122559467:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:46.956669Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:46.980434Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014128122559469:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:32:47.047212Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014132417526817:2543] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:47.083581Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [11:7593014132417526826:2336], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2026-01-08T15:32:47.087403Z node 11 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=11&id=OTM2YTBhODUtYTgxYmVhYjAtNTIzNjEyYzktZmJhNmYzMWY=, ActorId: [11:7593014128122559435:2325], ActorState: ExecuteState, LegacyTraceId: 01kef3t5dp7m522d1nq0grngec, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiCreateTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: \"text\"\n" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id#
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> DataShardVolatile::DistributedWriteAsymmetricExecute [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId >> MoveTable::WithData-Reboot [GOOD] >> TestSqsTopicHttpProxy::TestGetQueueUrlWithConsumer >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink >> TestSqsTopicHttpProxy::TestSendMessageTooBig >> DataShardStats::HasSchemaChanges_Families [GOOD] >> DataShardStats::BackupTableStatsReportInterval >> TExtSubDomainTest::DeclareAndDrop-EnableRealSystemViewPaths-true [GOOD] >> TGRpcCmsTest::SimpleTenantsTestSyncOperation >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithData-Reboot [GOOD] Test command err: 2026-01-08T15:32:50.080255Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:110;event=initialize_shard;step=OnActivateExecutor; 2026-01-08T15:32:50.109840Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:129;event=initialize_shard;step=initialize_tiring_finished; 2026-01-08T15:32:50.110054Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2026-01-08T15:32:50.117157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:32:50.117417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:32:50.117650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:32:50.117787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:32:50.117889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:32:50.117999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:32:50.118127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:32:50.118258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:32:50.118400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:32:50.118517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:32:50.118620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:32:50.118730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:32:50.118847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:32:50.147040Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2026-01-08T15:32:50.147687Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:131;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=Granules; 2026-01-08T15:32:50.147744Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2026-01-08T15:32:50.147965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:32:50.148155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:32:50.148247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:32:50.148306Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2026-01-08T15:32:50.148400Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2026-01-08T15:32:50.148460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:32:50.148507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:32:50.148528Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2026-01-08T15:32:50.148704Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:32:50.148787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:32:50.148836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:32:50.148865Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2026-01-08T15:32:50.148953Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2026-01-08T15:32:50.149002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:32:50.149041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:32:50.149074Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2026-01-08T15:32:50.149121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:32:50.149166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:32:50.149199Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2026-01-08T15:32:50.149250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:32:50.149305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:32:50.149333Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2026-01-08T15:32:50.149612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:32:50.149663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:32:50.149692Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2026-01-08T15:32:50.149826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:32:50.149877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:32:50.149923Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2026-01-08T15:32:50.149977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:32:50.150032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:32:50.150062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2026-01-08T15:32:50.150103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:32:50.150143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:32:50.150176Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=19;type=CopyBlobIdsToV2; 2026-01-08T15:32:50.150351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T15:32:50.150396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLAS ... line=actor.cpp:276;stage=data_format;batch_size=0;num_rows=100;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2026-01-08T15:32:51.117946Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:394;event=send_data;compute_actor_id=[1:331:2343];bytes=6350;rows=100;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2026-01-08T15:32:51.118230Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:298;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.118449Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:213;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.118621Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:218;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.118866Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:122;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2026-01-08T15:32:51.119070Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:213;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.119283Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:218;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.119580Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:457: Scan [1:337:2349] finished for tablet 9437184 2026-01-08T15:32:51.120034Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:463;event=scan_finish;compute_actor_id=[1:331:2343];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1649438,"name":"_full_task","f":1649438,"d_finished":0,"c":0,"l":1665977,"d":16539},"events":[{"name":"bootstrap","f":1649780,"d_finished":2431,"c":1,"l":1652211,"d":2431},{"a":1665153,"name":"ack","f":1663038,"d_finished":1947,"c":1,"l":1664985,"d":2771},{"a":1665138,"name":"processing","f":1652479,"d_finished":5956,"c":3,"l":1664988,"d":6795},{"name":"ProduceResults","f":1651558,"d_finished":3172,"c":6,"l":1665620,"d":3172},{"a":1665627,"name":"Finish","f":1665627,"d_finished":0,"c":0,"l":1665977,"d":350},{"name":"task_result","f":1652516,"d_finished":3929,"c":2,"l":1662904,"d":3929}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.120164Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:394;event=send_data;compute_actor_id=[1:331:2343];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2026-01-08T15:32:51.120692Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:399;event=scan_finished;compute_actor_id=[1:331:2343];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ProduceResults","f_Finish"],"t":0.016},{"events":["l_ack","l_processing","l_Finish"],"t":0.017}],"full":{"a":1649438,"name":"_full_task","f":1649438,"d_finished":0,"c":0,"l":1666527,"d":17089},"events":[{"name":"bootstrap","f":1649780,"d_finished":2431,"c":1,"l":1652211,"d":2431},{"a":1665153,"name":"ack","f":1663038,"d_finished":1947,"c":1,"l":1664985,"d":3321},{"a":1665138,"name":"processing","f":1652479,"d_finished":5956,"c":3,"l":1664988,"d":7345},{"name":"ProduceResults","f":1651558,"d_finished":3172,"c":6,"l":1665620,"d":3172},{"a":1665627,"name":"Finish","f":1665627,"d_finished":0,"c":0,"l":1666527,"d":900},{"name":"task_result","f":1652516,"d_finished":3929,"c":2,"l":1662904,"d":3929}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::ascending;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:51.120782Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2026-01-08T15:32:51.033354Z;index_granules=0;index_portions=1;index_batches=0;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2026-01-08T15:32:51.120824Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=read_context.h:196;event=scan_aborted;reason=unexpected on destructor; 2026-01-08T15:32:51.121009Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:337:2349];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=context.h:99;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; 2026-01-08T15:32:51.121902Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 9437184 2026-01-08T15:32:51.122608Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 12 scanId: 0 version: {1767886371059:12} readable: {1767886371059:max} at tablet 9437184 2026-01-08T15:32:51.122775Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 12 scanId: 0 at tablet 9437184 2026-01-08T15:32:51.122857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=12;scan_id=0;gen=0;table=;snapshot={1767886371059:12};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=1;result=not_found; 2026-01-08T15:32:51.122957Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=12;scan_id=0;gen=0;table=;snapshot={1767886371059:12};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:12;event=TTxScan failed;problem=cannot build table metadata accessor for request: incorrect table name and table id for scan start: undefined::1;details=; |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_schema/unittest >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 >> TestSqsTopicHttpProxy::TestGetQueueUrlEmpty |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_column_build_reboots/ydb-core-tx-schemeshard-ut_column_build_reboots |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build_reboots/ydb-core-tx-schemeshard-ut_column_build_reboots |86.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build_reboots/ydb-core-tx-schemeshard-ut_column_build_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDrop-EnableRealSystemViewPaths-true [GOOD] Test command err: 2026-01-08T15:32:44.100874Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014116444417046:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:44.100931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:44.475337Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: dc-1/.metadata/script_executions 2026-01-08T15:32:44.488102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:44.488215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:44.600133Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:44.613289Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014116444417016:2081] 1767886364085735 != 1767886364085738 2026-01-08T15:32:44.627393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:44.677445Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: dc-1/.metadata/script_executions 2026-01-08T15:32:44.837206Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014116444417279:2105] Handle TEvNavigate describe path dc-1 2026-01-08T15:32:44.837249Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014116444417585:2268] HANDLE EvNavigateScheme dc-1 2026-01-08T15:32:44.837344Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014116444417299:2115], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:44.837434Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014116444417455:2199][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014116444417299:2115], cookie# 1 2026-01-08T15:32:44.839535Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014116444417493:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417490:2199], cookie# 1 2026-01-08T15:32:44.839583Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014116444417494:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417491:2199], cookie# 1 2026-01-08T15:32:44.839596Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014116444417495:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417492:2199], cookie# 1 2026-01-08T15:32:44.839638Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014116444416984:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417493:2199], cookie# 1 2026-01-08T15:32:44.839689Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014116444416987:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417494:2199], cookie# 1 2026-01-08T15:32:44.839708Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014116444416990:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014116444417495:2199], cookie# 1 2026-01-08T15:32:44.839757Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014116444417493:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444416984:2049], cookie# 1 2026-01-08T15:32:44.839772Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014116444417494:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444416987:2052], cookie# 1 2026-01-08T15:32:44.839802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014116444417495:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444416990:2055], cookie# 1 2026-01-08T15:32:44.839849Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014116444417455:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444417490:2199], cookie# 1 2026-01-08T15:32:44.839875Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014116444417455:2199][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:44.839893Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014116444417455:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444417491:2199], cookie# 1 2026-01-08T15:32:44.839913Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014116444417455:2199][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:44.839995Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014116444417455:2199][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014116444417492:2199], cookie# 1 2026-01-08T15:32:44.840021Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014116444417455:2199][/dc-1] Sync cookie mismatch: sender# [1:7593014116444417492:2199], cookie# 1, current cookie# 0 2026-01-08T15:32:44.840088Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014116444417299:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:44.863307Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014116444417299:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014116444417455:2199] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:44.863448Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014116444417299:2115], cacheItem# { Subscriber: { Subscriber: [1:7593014116444417455:2199] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:44.866088Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014116444417586:2269], recipient# [1:7593014116444417585:2268], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:44.866154Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014116444417585:2268] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:44.940879Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014116444417585:2268] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:44.948511Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014116444417585:2268] Handle TEvDescribeSchemeResult Forward to# [1:7593014116444417584:2267] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:44.954801Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:7593014116444417279:2105] Handle TEvProposeTransaction 2026-01-08T15:32:44.954833Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:7593014116444417279:2105] TxId# 281474976710657 ProcessProposeTransaction 2026-01-08T15:32:44.954888Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:7593014116444417279:2105] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7593014116444417592:2274] 2026-01-08T15:32:45.108281Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014116444417299:2115], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:184467440737 ... schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2026-01-08T15:32:48.712089Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2026-01-08T15:32:48.712102Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 38], at schemeshard: 72057594046644480 2026-01-08T15:32:48.712132Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 38] was 1 2026-01-08T15:32:48.712209Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2026-01-08T15:32:48.712244Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2026-01-08T15:32:48.712254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 38], at schemeshard: 72057594046644480 2026-01-08T15:32:48.712288Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2026-01-08T15:32:48.712608Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2026-01-08T15:32:48.712649Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:252: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 subdomains, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2026-01-08T15:32:48.712674Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2026-01-08T15:32:48.718659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2026-01-08T15:32:48.719416Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [2:7593014136946651419:2106] Handle TEvNavigate describe path /dc-1 2026-01-08T15:32:48.719448Z node 2 :TX_PROXY DEBUG: describe.cpp:270: Actor# [2:7593014136946651995:2519] HANDLE EvNavigateScheme /dc-1 2026-01-08T15:32:48.719527Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7593014136946651428:2109], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:48.719602Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][2:7593014136946651647:2222][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7593014136946651428:2109], cookie# 4 2026-01-08T15:32:48.719668Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014136946651662:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651659:2222], cookie# 4 2026-01-08T15:32:48.719684Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014136946651663:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651660:2222], cookie# 4 2026-01-08T15:32:48.719697Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014136946651664:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651661:2222], cookie# 4 2026-01-08T15:32:48.719730Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014136946651125:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651662:2222], cookie# 4 2026-01-08T15:32:48.719763Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014136946651128:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651663:2222], cookie# 4 2026-01-08T15:32:48.719803Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014136946651131:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014136946651664:2222], cookie# 4 2026-01-08T15:32:48.719857Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014136946651662:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651125:2049], cookie# 4 2026-01-08T15:32:48.719876Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014136946651663:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651128:2052], cookie# 4 2026-01-08T15:32:48.719891Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014136946651664:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651131:2055], cookie# 4 2026-01-08T15:32:48.719922Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014136946651647:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651659:2222], cookie# 4 2026-01-08T15:32:48.719943Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][2:7593014136946651647:2222][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:48.719960Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014136946651647:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651660:2222], cookie# 4 2026-01-08T15:32:48.719978Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][2:7593014136946651647:2222][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:48.719999Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014136946651647:2222][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 10 Partial: 0 Cluster State: { } }: sender# [2:7593014136946651661:2222], cookie# 4 2026-01-08T15:32:48.720009Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][2:7593014136946651647:2222][/dc-1] Sync cookie mismatch: sender# [2:7593014136946651661:2222], cookie# 4, current cookie# 0 2026-01-08T15:32:48.720043Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [2:7593014136946651428:2109], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:48.720101Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [2:7593014136946651428:2109], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7593014136946651647:2222] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886368661 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:48.720204Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [2:7593014136946651428:2109], cacheItem# { Subscriber: { Subscriber: [2:7593014136946651647:2222] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886368661 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2026-01-08T15:32:48.720362Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [2:7593014136946651996:2520], recipient# [2:7593014136946651995:2519], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:48.720388Z node 2 :TX_PROXY DEBUG: describe.cpp:354: Actor# [2:7593014136946651995:2519] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:48.720446Z node 2 :TX_PROXY DEBUG: describe.cpp:433: Actor# [2:7593014136946651995:2519] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:48.721017Z node 2 :TX_PROXY DEBUG: describe.cpp:446: Actor# [2:7593014136946651995:2519] Handle TEvDescribeSchemeResult Forward to# [2:7593014136946651994:2518] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 67 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1767886368661 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 8 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 36 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |86.8%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |86.8%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration >> BasicUsage::CreateTopicWithSharedConsumer_KeepMessagesOrder_True [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_MoveDeadLetterPolicy >> KqpPg::DropSequence [GOOD] >> KqpPg::DeleteWithQueryService+useSink >> TGRpcCmsTest::AlterRemoveTest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile >> KqpPg::PgCreateTable [GOOD] >> KqpPg::PgUpdate+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 >> KqpPg::AlterSequence [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |86.8%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> TTxDataShardRecomputeKMeansScan::MainTable [GOOD] >> TTxDataShardRecomputeKMeansScan::BuildTable+WithForeign ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] Test command err: 2026-01-08T15:32:51.627268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:110;event=initialize_shard;step=OnActivateExecutor; 2026-01-08T15:32:51.677098Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];fline=columnshard.cpp:129;event=initialize_shard;step=initialize_tiring_finished; 2026-01-08T15:32:51.677436Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2026-01-08T15:32:51.688011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:32:51.688325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:32:51.688602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:32:51.688724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:32:51.688839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:32:51.688946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:32:51.689075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:32:51.689197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:32:51.689352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:32:51.689470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:32:51.689603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:32:51.689741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:32:51.689857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:32:51.730737Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2026-01-08T15:32:51.731764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:131;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=Granules; 2026-01-08T15:32:51.731873Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2026-01-08T15:32:51.732091Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:32:51.732301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:32:51.732399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:32:51.732472Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2026-01-08T15:32:51.732593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2026-01-08T15:32:51.732684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:32:51.732734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:32:51.732764Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2026-01-08T15:32:51.732958Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2026-01-08T15:32:51.733030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:32:51.733080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:32:51.733113Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2026-01-08T15:32:51.733221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2026-01-08T15:32:51.733284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:32:51.733396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:32:51.733431Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2026-01-08T15:32:51.733495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:32:51.733544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:32:51.733576Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2026-01-08T15:32:51.733629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:32:51.733682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:32:51.733720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2026-01-08T15:32:51.734009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:32:51.734063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:32:51.734096Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2026-01-08T15:32:51.734233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:32:51.734294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:32:51.734346Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2026-01-08T15:32:51.734412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:32:51.734462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:32:51.734514Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2026-01-08T15:32:51.734576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:32:51.734621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:32:51.734652Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=19;type=CopyBlobIdsToV2; 2026-01-08T15:32:51.734835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T15:32:51.734899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLAS ... 7184;request_tx=104:TX_KIND_SCHEMA;min=1767886372713;max=18446744073709551615;plan=0;src=[1:104:2137];cookie=00:2;;this=137002094873632;op_tx=104:TX_KIND_SCHEMA;min=1767886372713;max=18446744073709551615;plan=0;src=[1:104:2137];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1767886372713;max=18446744073709551615;plan=0;src=[1:104:2137];cookie=00:2;;int_this=137208235128000;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=104; 2026-01-08T15:32:55.404077Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2026-01-08T15:32:55.404244Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1767886372713 at tablet 9437184, mediator 0 2026-01-08T15:32:55.404309Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[12] execute at tablet 9437184 2026-01-08T15:32:55.404624Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 1000000185, ss: 1} at tablet 9437184 2026-01-08T15:32:55.424096Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[12] complete at tablet 9437184 2026-01-08T15:32:55.424829Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1767886372713:max} readable: {1767886372713:max} at tablet 9437184 2026-01-08T15:32:55.425006Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2026-01-08T15:32:55.437972Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1767886372713:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:34;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2026-01-08T15:32:55.438097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1767886372713:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:103;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2026-01-08T15:32:55.444226Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1767886372713:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:52;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2026-01-08T15:32:55.446545Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1767886372713:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:140;filter_limit_not_detected=no_ranges; 2026-01-08T15:32:55.545272Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: tablet_id=9437184;self_id=[1:129:2159];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1767886372713:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:214;event=TTxScan started;actor_id=[1:563:2575];trace_detailed=; 2026-01-08T15:32:55.546911Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: fline=context.cpp:92;ff_first=(column_ids=1;column_names=timestamp;);; 2026-01-08T15:32:55.547238Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: fline=context.cpp:107;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2026-01-08T15:32:55.547675Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: fline=actor.cpp:213;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.547907Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: fline=actor.cpp:218;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.548279Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:122;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2026-01-08T15:32:55.548498Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:213;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.548741Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:218;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.549033Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:457: Scan [1:563:2575] finished for tablet 9437184 2026-01-08T15:32:55.549563Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:463;event=scan_finish;compute_actor_id=[1:557:2569];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.002},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.003}],"full":{"a":4694307,"name":"_full_task","f":4694307,"d_finished":0,"c":0,"l":4698238,"d":3931},"events":[{"name":"bootstrap","f":4694697,"d_finished":2387,"c":1,"l":4697084,"d":2387},{"a":4697358,"name":"ack","f":4697358,"d_finished":0,"c":0,"l":4698238,"d":880},{"a":4697333,"name":"processing","f":4697333,"d_finished":0,"c":0,"l":4698238,"d":905},{"name":"ProduceResults","f":4696616,"d_finished":923,"c":2,"l":4697920,"d":923},{"a":4697933,"name":"Finish","f":4697933,"d_finished":0,"c":0,"l":4698238,"d":305}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.549678Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:394;event=send_data;compute_actor_id=[1:557:2569];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2026-01-08T15:32:55.550160Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:399;event=scan_finished;compute_actor_id=[1:557:2569];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.002},{"events":["f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.003},{"events":["l_ack","l_processing","l_Finish"],"t":0.004}],"full":{"a":4694307,"name":"_full_task","f":4694307,"d_finished":0,"c":0,"l":4698857,"d":4550},"events":[{"name":"bootstrap","f":4694697,"d_finished":2387,"c":1,"l":4697084,"d":2387},{"a":4697358,"name":"ack","f":4697358,"d_finished":0,"c":0,"l":4698857,"d":1499},{"a":4697333,"name":"processing","f":4697333,"d_finished":0,"c":0,"l":4698857,"d":1524},{"name":"ProduceResults","f":4696616,"d_finished":923,"c":2,"l":4697920,"d":923},{"a":4697933,"name":"Finish","f":4697933,"d_finished":0,"c":0,"l":4698857,"d":924}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(CTX:{ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;};SCANNER:{S:{{finished:{1};internal:{};constructor:{{class_name=GENERAL_ORDERING::not_sorted;internal={{CC:0}};}};in_fly=0;type=FULL_SORTED;}};SP:[{{RESULT;IDX=0;FIN=1;}};]};SF:1;PR:0;); 2026-01-08T15:32:55.550251Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2026-01-08T15:32:55.446472Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2026-01-08T15:32:55.550309Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:196;event=scan_aborted;reason=unexpected on destructor; 2026-01-08T15:32:55.563024Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:841: SelfId=[1:563:2575];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:99;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/columnshard/ut_schema/unittest >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] >> DataShardWrite::UncommittedUpdateLockMissingRow >> TGRpcCmsTest::DisabledTxTest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2026-01-08T15:32:45.434439Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014124197555550:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:45.435015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:45.609719Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:46.258400Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: dc-1/.metadata/script_executions 2026-01-08T15:32:46.326238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:46.326369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:46.337532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:46.479678Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:46.533482Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:46.537265Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: dc-1/.metadata/script_executions 2026-01-08T15:32:46.550465Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014124197555505:2081] 1767886365354860 != 1767886365354863 2026-01-08T15:32:46.954038Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014124197555784:2117] Handle TEvNavigate describe path dc-1 2026-01-08T15:32:46.954113Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014128492523599:2455] HANDLE EvNavigateScheme dc-1 2026-01-08T15:32:46.954207Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014124197555793:2120], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:46.956472Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014128492523351:2291][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014124197555793:2120], cookie# 1 2026-01-08T15:32:46.958164Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014128492523366:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523363:2291], cookie# 1 2026-01-08T15:32:46.958218Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014128492523367:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523364:2291], cookie# 1 2026-01-08T15:32:46.958329Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014128492523368:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523365:2291], cookie# 1 2026-01-08T15:32:46.958482Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014124197555473:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523366:2291], cookie# 1 2026-01-08T15:32:46.958510Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014124197555476:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523367:2291], cookie# 1 2026-01-08T15:32:46.958527Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014124197555479:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014128492523368:2291], cookie# 1 2026-01-08T15:32:46.958662Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014128492523366:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014124197555473:2049], cookie# 1 2026-01-08T15:32:46.958680Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014128492523367:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014124197555476:2052], cookie# 1 2026-01-08T15:32:46.958704Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014128492523368:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014124197555479:2055], cookie# 1 2026-01-08T15:32:46.958769Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014128492523351:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014128492523363:2291], cookie# 1 2026-01-08T15:32:46.958796Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014128492523351:2291][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:46.958834Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014128492523351:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014128492523364:2291], cookie# 1 2026-01-08T15:32:46.958864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014128492523351:2291][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:46.958896Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014128492523351:2291][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014128492523365:2291], cookie# 1 2026-01-08T15:32:46.958918Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014128492523351:2291][/dc-1] Sync cookie mismatch: sender# [1:7593014128492523365:2291], cookie# 1, current cookie# 0 2026-01-08T15:32:46.958995Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014124197555793:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:46.985102Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014124197555793:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014128492523351:2291] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:46.985240Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014124197555793:2120], cacheItem# { Subscriber: { Subscriber: [1:7593014128492523351:2291] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:46.988459Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014128492523602:2458], recipient# [1:7593014128492523599:2455], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:46.988563Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014128492523599:2455] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:47.025287Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014128492523599:2455] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:47.028009Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014128492523599:2455] Handle TEvDescribeSchemeResult Forward to# [1:7593014128492523597:2453] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:47.047193Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:7593014124197555784:2117] Handle TEvProposeTransaction 2026-01-08T15:32:47.047223Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:7593014124197555784:2117] TxId# 281474976710657 ProcessProposeTransaction 2026-01-08T15:32:47.047312Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:7593014124197555784:2117] Cookie# 0 userReqId# "" tx ... true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:53.512445Z node 2 :TX_PROXY INFO: describe.cpp:354: Actor# [2:7593014155221487904:2687] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 2026-01-08T15:32:53.513871Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [2:7593014146631552424:2102] Handle TEvNavigate describe path /dc-1 2026-01-08T15:32:53.513904Z node 2 :TX_PROXY DEBUG: describe.cpp:270: Actor# [2:7593014155221487907:2690] HANDLE EvNavigateScheme /dc-1 2026-01-08T15:32:53.513971Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7593014146631552454:2114], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:53.514038Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][2:7593014146631552546:2156][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7593014146631552454:2114], cookie# 4 2026-01-08T15:32:53.514125Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014146631552550:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552547:2156], cookie# 4 2026-01-08T15:32:53.514142Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014146631552551:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552548:2156], cookie# 4 2026-01-08T15:32:53.514158Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][2:7593014146631552552:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552549:2156], cookie# 4 2026-01-08T15:32:53.514184Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014146631552151:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552550:2156], cookie# 4 2026-01-08T15:32:53.514211Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014146631552154:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552551:2156], cookie# 4 2026-01-08T15:32:53.514229Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [2:7593014146631552157:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7593014146631552552:2156], cookie# 4 2026-01-08T15:32:53.514266Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014146631552550:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552151:2049], cookie# 4 2026-01-08T15:32:53.514286Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014146631552551:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552154:2052], cookie# 4 2026-01-08T15:32:53.514336Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][2:7593014146631552552:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552157:2055], cookie# 4 2026-01-08T15:32:53.514386Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014146631552546:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552547:2156], cookie# 4 2026-01-08T15:32:53.514406Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][2:7593014146631552546:2156][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:53.514425Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014146631552546:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552548:2156], cookie# 4 2026-01-08T15:32:53.514448Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][2:7593014146631552546:2156][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:53.514473Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][2:7593014146631552546:2156][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 Cluster State: { } }: sender# [2:7593014146631552549:2156], cookie# 4 2026-01-08T15:32:53.514484Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][2:7593014146631552546:2156][/dc-1] Sync cookie mismatch: sender# [2:7593014146631552549:2156], cookie# 4, current cookie# 0 2026-01-08T15:32:53.514523Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [2:7593014146631552454:2114], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:53.514582Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [2:7593014146631552454:2114], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7593014146631552546:2156] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886372371 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:53.514646Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [2:7593014146631552454:2114], cacheItem# { Subscriber: { Subscriber: [2:7593014146631552546:2156] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1767886372371 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2026-01-08T15:32:53.514777Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [2:7593014155221487908:2691], recipient# [2:7593014155221487907:2690], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:53.514841Z node 2 :TX_PROXY DEBUG: describe.cpp:354: Actor# [2:7593014155221487907:2690] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:53.514901Z node 2 :TX_PROXY DEBUG: describe.cpp:433: Actor# [2:7593014155221487907:2690] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:53.515520Z node 2 :TX_PROXY DEBUG: describe.cpp:446: Actor# [2:7593014155221487907:2690] Handle TEvDescribeSchemeResult Forward to# [2:7593014155221487906:2689] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1767886372371 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:53.588401Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7593014146631552454:2114], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:53.588529Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [2:7593014146631552454:2114], cacheItem# { Subscriber: { Subscriber: [2:7593014150926520555:2676] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:32:53.588614Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [2:7593014155221487911:2692], recipient# [2:7593014155221487910:2298], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |86.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] >> KqpPg::EquiJoin-useSink [GOOD] >> KqpPg::ExplainColumnsReorder >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> TGRpcCmsTest::AuthTokenTest >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/fqrun |86.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun |86.8%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |86.8%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut >> TestSqsTopicHttpProxy::TestGetQueueUrlWithConsumer [GOOD] >> TGRpcCmsTest::SimpleTenantsTest >> TxUsage::WriteToTopic_Demo_16_Table >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink >> TestSqsTopicHttpProxy::TestSendMessageTooBig [GOOD] >> TGRpcCmsTest::DescribeOptionsTest >> TestSqsTopicHttpProxy::TestReceiveMessage >> TGRpcCmsTest::AlterRemoveTest [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsert >> TestSqsTopicHttpProxy::TestGetQueueUrlEmpty [GOOD] |86.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut >> TestSqsTopicHttpProxy::TestSendMessageBatchLong >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] >> TestSqsTopicHttpProxy::TestGetQueueUrlOfNotExistingQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest [GOOD] Test command err: 2026-01-08T15:32:56.544404Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014168696128945:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:56.544460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:56.676410Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.009132s 2026-01-08T15:32:56.989108Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:57.186398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:57.186498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:57.194454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:57.293764Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:57.451769Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:57.619092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:57.619115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:57.619122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:57.619242Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:57.677825Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:57.977742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:58.035195Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7593014177286064645:2302], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50580" } 2026-01-08T15:32:58.035233Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2026-01-08T15:32:58.035259Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.035267Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.035351Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50580" 2026-01-08T15:32:58.035447Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1767886378035425) 2026-01-08T15:32:58.095895Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2568: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1767886378035425 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2026-01-08T15:32:58.096136Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2628: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2026-01-08T15:32:58.099953Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2026-01-08T15:32:58.101068Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378035425&action=1" } } } 2026-01-08T15:32:58.101204Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.101264Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:58.101411Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:58.101807Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2026-01-08T15:32:58.101926Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:32:58.104807Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014177286064653:2303], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378035425&action=1" } UserToken: "" } 2026-01-08T15:32:58.104837Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:58.105012Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378035425&action=1" } } 2026-01-08T15:32:58.112759Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2026-01-08T15:32:58.112828Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:58.112903Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7593014177286064650:2211], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:58.112920Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:58.112941Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.112949Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.113000Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:32:58.113028Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2026-01-08T15:32:58.113088Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3197: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2026-01-08T15:32:58.120484Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:58.120511Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.120519Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.120527Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.120578Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2026-01-08T15:32:58.120603Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1767886378035425 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:58.124962Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:58.140210Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.141348Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:32:58.141364Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:597: TSubDomainManip(/Root/users/user-1) create subdomain 2026-01-08T15:32:58.160976Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:615: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" PeerName: "ipv6:[::1]:50580" 2026-01-08T15:32:58.162250Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014177286064711:2306], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378035425&action=1" } UserToken: "" } 2026-01-08T15:32:58.162279Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:58.162457Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378035425&action=1" } } 2026-01-08T15:32:58.162656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:32:58.166553Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:823: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 39 2026-01-08T15:32:58.166616Z node 1 :CMS_TENANTS TRACE: console_tenants_man ... onsole_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2026-01-08T15:32:58.274337Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:685: TSubdomainManip(/Root/users/user-1) done 2026-01-08T15:32:58.274367Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:701: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2026-01-08T15:32:58.274458Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:823: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2026-01-08T15:32:58.274508Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:759: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715660 2026-01-08T15:32:58.274570Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7593014177286064754:2211], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2026-01-08T15:32:58.274589Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2026-01-08T15:32:58.274600Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3652: Ignoring ready subdomain for tenant /Root/users/user-1 in REMOVING_SUBDOMAIN state 2026-01-08T15:32:58.277187Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014177286064872:2313], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378250099&action=2" } UserToken: "" } 2026-01-08T15:32:58.277220Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:58.277371Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378250099&action=2" } } 2026-01-08T15:32:58.281325Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:795: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715660 2026-01-08T15:32:58.289029Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2026-01-08T15:32:58.289173Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,7) wasn't found 2026-01-08T15:32:58.289217Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2026-01-08T15:32:58.289254Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,10) wasn't found 2026-01-08T15:32:58.289411Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,9) wasn't found 2026-01-08T15:32:58.289454Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found 2026-01-08T15:32:58.289488Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2026-01-08T15:32:58.290117Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,8) wasn't found 2026-01-08T15:32:58.290215Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2026-01-08T15:32:58.292310Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2026-01-08T15:32:58.292319Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:685: TSubdomainManip(/Root/users/user-1) done 2026-01-08T15:32:58.292358Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:701: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:58.292491Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7593014177286064859:2211], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:58.292516Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:58.292529Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.292537Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.292565Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2026-01-08T15:32:58.292581Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1767886378250099 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:58.292633Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886378250099 issue= 2026-01-08T15:32:58.298445Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2026-01-08T15:32:58.298524Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2105: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2026-01-08T15:32:58.298537Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.298857Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014168696129327:2213], Recipient [1:7593014172991096746:2211]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:32:58.298873Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:32:58.298895Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.298903Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.298929Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2026-01-08T15:32:58.298951Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1767886378250099 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:58.303259Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:58.303302Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.303332Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:58.303430Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:58.303913Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 39 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2026-01-08T15:32:58.304022Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2026-01-08T15:32:58.308645Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2026-01-08T15:32:58.308768Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7593014177286064964:2211], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:58.308801Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:58.308817Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.308827Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.308875Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2026-01-08T15:32:58.308897Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2026-01-08T15:32:58.317744Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:58.317774Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:58.317782Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.317788Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:58.317839Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1767886378250099 2026-01-08T15:32:58.317857Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886378250099 issue= 2026-01-08T15:32:58.317868Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2949: Remove tenant /Root/users/user-1 from database txid=1767886378250099 issue= 2026-01-08T15:32:58.317875Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2954: Remove pool /Root/users/user-1:hdd from database 2026-01-08T15:32:58.317943Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3074: Add tenant removal info for /Root/users/user-1 txid=1767886378250099 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:58.328335Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2026-01-08T15:32:58.328419Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:58.331722Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014177286064983:2316], Recipient [1:7593014172991096746:2211]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378250099&action=2" } UserToken: "" } 2026-01-08T15:32:58.331748Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:58.331928Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886378250099&action=2" ready: true status: SUCCESS } } |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite >> TGRpcCmsTest::DisabledTxTest [GOOD] >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |86.9%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] Test command err: 2026-01-08T15:32:53.001918Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014155873098214:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:53.001971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:53.898406Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:53.991477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:53.991604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:54.225425Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:54.266091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:54.281114Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:54.309827Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:54.631094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:54.631120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:54.631127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:54.631197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:55.173008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:55.290397Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7593014164463033890:2303], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:51490" } 2026-01-08T15:32:55.290447Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2026-01-08T15:32:55.290476Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:55.290506Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:55.290659Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:51490" 2026-01-08T15:32:55.290804Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1767886375290512) 2026-01-08T15:32:55.324633Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2568: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1767886375290512 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2026-01-08T15:32:55.324901Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2628: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2026-01-08T15:32:55.328433Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2026-01-08T15:32:55.330532Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886375290512&action=1" } } } 2026-01-08T15:32:55.330691Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:55.330761Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:55.330885Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:55.331366Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2026-01-08T15:32:55.331482Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:32:55.333480Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285139, Sender [1:7593014164463033890:2303], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TEvConsole::TEvNotifyOperationCompletionRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886375290512&action=1" } UserToken: "" PeerName: "ipv6:[::1]:51490" } 2026-01-08T15:32:55.333498Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:968: StateWork, processing event TEvConsole::TEvNotifyOperationCompletionRequest 2026-01-08T15:32:55.333718Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3434: Add subscription to /Root/users/user-1 for [1:7593014164463033890:2303] 2026-01-08T15:32:55.333822Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3442: Send TEvConsole::TEvNotifyOperationCompletionResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886375290512&action=1" } } 2026-01-08T15:32:55.339043Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2026-01-08T15:32:55.339112Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:55.339190Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7593014164463033895:2208], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:55.339213Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:55.339226Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:55.339233Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:55.339385Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:32:55.339417Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2026-01-08T15:32:55.339485Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3197: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2026-01-08T15:32:55.345722Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:55.345766Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:55.345775Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:55.345783Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:55.345845Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2026-01-08T15:32:55.345869Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1767886375290512 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:55.350277Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:55.374129Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:55.374454Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:32:55.374467Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:597: TSubDomainManip(/Root/users/user-1) create subdomain 2026-01-08T15:32:55.386241Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:615: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" PeerName: "ipv6:[::1]:51490" 2026-01-08T15:32:55.388975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:32:55.393315Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:823: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 39 2026-01-08T15:32:55.393398Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:759: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715658 2026-01-08T15:32:55.403935Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:795: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715658 2026-01-08T15:32:55.418721Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715658 2026-01-08T15:32:55.421266Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:872: TSubdomainManip(/Root/users/user-1) got ... Hive::TTxDeleteTablet tablet (72057594046644480,8) wasn't found - using supplied 72075186224037889 2026-01-08T15:32:56.850244Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found - using supplied 72075186224037896 2026-01-08T15:32:56.852543Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2026-01-08T15:32:56.852567Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:685: TSubdomainManip(/Root/users/user-1) done 2026-01-08T15:32:56.852612Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:701: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:56.852732Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7593014168758001847:2208], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:56.852749Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:56.852769Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:56.852780Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:56.852814Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2026-01-08T15:32:56.852836Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1767886376694346 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:56.852900Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886376694346 issue= 2026-01-08T15:32:56.855544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:9 2026-01-08T15:32:56.855592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:6 2026-01-08T15:32:56.855632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:3 2026-01-08T15:32:56.855666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:8 2026-01-08T15:32:56.855697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:5 2026-01-08T15:32:56.862474Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2026-01-08T15:32:56.862576Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2105: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2026-01-08T15:32:56.862594Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:56.862977Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014155873098583:2209], Recipient [1:7593014155873098705:2208]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:32:56.862994Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:32:56.863019Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:56.863028Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:56.863053Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2026-01-08T15:32:56.863073Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1767886376694346 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:56.876314Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2026-01-08T15:32:56.877167Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:56.877213Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:56.877248Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:56.877365Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:56.877801Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 39 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2026-01-08T15:32:56.877889Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2026-01-08T15:32:56.887076Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2026-01-08T15:32:56.887176Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7593014168758001937:2208], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:56.887211Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:56.887225Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:56.887237Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:56.887287Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2026-01-08T15:32:56.887309Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2026-01-08T15:32:56.898471Z node 3 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [d55df13aa165b88f] Result# TEvPutResult {Id# [72075186224037888:1:17:0:0:105:0] Status# ERROR StatusFlags# { } ErrorReason# "Request got Poison" ApproximateFreeSpaceShare# 0} GroupId# 2181038081 Marker# BPP12 2026-01-08T15:32:56.898631Z node 3 :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038081 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.169 sample PartId# [72075186224037888:1:17:0:0:105:1] QueryCount# 1 VDiskId# [82000001:2:0:0:0] NodeId# 1 } ] } 2026-01-08T15:32:56.919908Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:56.919955Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:56.919963Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:56.919970Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:56.920045Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1767886376694346 2026-01-08T15:32:56.920057Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886376694346 issue= 2026-01-08T15:32:56.920068Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2949: Remove tenant /Root/users/user-1 from database txid=1767886376694346 issue= 2026-01-08T15:32:56.920075Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2954: Remove pool /Root/users/user-1:hdd from database 2026-01-08T15:32:56.920188Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3074: Add tenant removal info for /Root/users/user-1 txid=1767886376694346 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:56.940620Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2026-01-08T15:32:56.940872Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2422: Send /Root/users/user-1 notification to [1:7593014168758001819:2374]: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886376694346&action=2" ready: true status: SUCCESS } } 2026-01-08T15:32:56.940990Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:56.946379Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014168758001964:2376], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:51490" } 2026-01-08T15:32:56.946410Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:32:56.946564Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3368: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2026-01-08T15:32:56.949076Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7593014168758001967:2377], Recipient [1:7593014155873098705:2208]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:51490" } 2026-01-08T15:32:56.949110Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2026-01-08T15:32:56.949314Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3412: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2026-01-08T15:32:56.957519Z node 1 :HIVE WARN: tx__status.cpp:58: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2026-01-08T15:32:56.957677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2026-01-08T15:33:00.454873Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7593014165705046906:2199];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:00.454950Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/users/user-1/.metadata/initialization/migrations;error=timeout; |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |86.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit >> KqpPg::TableSelect+useSink [GOOD] >> KqpPg::TableSelect-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DisabledTxTest [GOOD] Test command err: 2026-01-08T15:32:58.478444Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014176739013667:2202];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:58.480767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:58.599330Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:58.894102Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:58.951912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:58.952033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:58.990521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:59.085310Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:59.100912Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:59.278917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:59.278954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:59.278969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:59.279051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:59.481997Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:59.663252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:59.786383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2026-01-08T15:32:59.844760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> KqpPg::DeleteWithQueryService+useSink [GOOD] >> KqpPg::DeleteWithQueryService-useSink >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching >> DataShardWrite::UncommittedUpdateLockMissingRow [GOOD] >> DataShardWrite::UncommittedUpdateLockNewRowAboveSnapshot >> TxUsage::WriteToTopic_Demo_43_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] Test command err: 2026-01-08T15:32:54.939638Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014163259179484:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:54.939708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:55.053671Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:55.477489Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:55.592271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:55.592428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:55.693950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:55.790787Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:55.808582Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:55.966803Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:56.043320Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:56.043338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:56.043344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:56.058370Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:56.530755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:56.842920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:32:57.011768Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7593014171849115205:2302], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)0\000" PeerName: "ipv6:[::1]:53278" } 2026-01-08T15:32:57.011829Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2026-01-08T15:32:57.011857Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:57.011873Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:57.012002Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)0\000" PeerName: "ipv6:[::1]:53278" 2026-01-08T15:32:57.012225Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1767886377010772) 2026-01-08T15:32:57.053507Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2568: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1767886377010772 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2026-01-08T15:32:57.053752Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2628: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2026-01-08T15:32:57.060087Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2026-01-08T15:32:57.061261Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886377010772&action=1" } } } 2026-01-08T15:32:57.061404Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:57.061475Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:57.064328Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:57.064848Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2026-01-08T15:32:57.064968Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:32:57.068226Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014176144082512:2304], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886377010772&action=1" } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)0\000" } 2026-01-08T15:32:57.068256Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:57.068656Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886377010772&action=1" } } 2026-01-08T15:32:57.072930Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2026-01-08T15:32:57.072976Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:57.073038Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7593014176144082508:2217], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:57.073055Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2026-01-08T15:32:57.073074Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:57.073083Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:57.073125Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:32:57.073153Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2026-01-08T15:32:57.073212Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3197: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2026-01-08T15:32:57.080617Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:57.080649Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:57.080658Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:57.080666Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:57.080734Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2026-01-08T15:32:57.080758Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1767886377010772 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:32:57.088751Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:57.120595Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:57.120682Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:32:57.120692Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:597: TSubDomainManip(/Root/users/user-1) create subdomain 2026-01-08T15:32:57.138525Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:615: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)0\000" DatabaseName: "Root" PeerName: "ipv6:[::1]:53278" 2026-01-08T15:32:57.138761Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014176144082572:2307], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886377010772&action=1" } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)0\000" } 2026-01-08T15:32:57.138776Z ... 9.423457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:7 2026-01-08T15:32:59.423490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:4 2026-01-08T15:32:59.423521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:10 2026-01-08T15:32:59.425639Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715663 2026-01-08T15:32:59.425665Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:685: TSubdomainManip(/Root/users/user-1) done 2026-01-08T15:32:59.425714Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:701: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:59.425853Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7593014184734017839:2217], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:59.425876Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2026-01-08T15:32:59.425891Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:59.425901Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:59.425944Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2026-01-08T15:32:59.425971Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1767886379364650 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.426055Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886379364650 issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.434873Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2026-01-08T15:32:59.434975Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2105: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2026-01-08T15:32:59.434990Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:59.436665Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014167554147087:2195], Recipient [1:7593014167554147223:2217]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:32:59.436701Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:32:59.436722Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:59.436737Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:59.436762Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2026-01-08T15:32:59.436807Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1767886379364650 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.440744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:9 2026-01-08T15:32:59.440820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:6 2026-01-08T15:32:59.440855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:3 2026-01-08T15:32:59.440899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:8 2026-01-08T15:32:59.440954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:5 2026-01-08T15:32:59.442532Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2026-01-08T15:32:59.447371Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:32:59.447425Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:59.447464Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:32:59.447629Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:32:59.448489Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 39 } ItemConfigGeneration: 2 } } Success: true ConfigTxSeqNo: 10 2026-01-08T15:32:59.448581Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 2 } } } 2026-01-08T15:32:59.481285Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014184734017937:2390], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886379364650&action=2" } UserToken: "" } 2026-01-08T15:32:59.481326Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:59.481571Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886379364650&action=2" } } 2026-01-08T15:32:59.492217Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 11 2026-01-08T15:32:59.492326Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7593014184734017933:2217], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:59.492360Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2026-01-08T15:32:59.492372Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:59.492391Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:59.492446Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2026-01-08T15:32:59.492465Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2026-01-08T15:32:59.494040Z node 3 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [12cc0858ceebfea9] Result# TEvPutResult {Id# [72075186224037895:1:15:1:24576:95:0] Status# ERROR StatusFlags# { } ErrorReason# "Request got Poison" ApproximateFreeSpaceShare# 0} GroupId# 2181038081 Marker# BPP12 2026-01-08T15:32:59.494226Z node 3 :BS_PROXY_PUT ERROR: {BPP72@dsproxy_put.cpp:483} Query history GroupId# 2181038081 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.412 sample PartId# [72075186224037895:1:15:1:24576:95:1] QueryCount# 4 VDiskId# [82000001:2:0:0:0] NodeId# 1 } ] } 2026-01-08T15:32:59.504199Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:32:59.504244Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:32:59.504253Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:59.504261Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:32:59.504352Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1767886379364650 2026-01-08T15:32:59.504371Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2918: Remove computational units of /Root/users/user-1 from database txid=1767886379364650 issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.504407Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2949: Remove tenant /Root/users/user-1 from database txid=1767886379364650 issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.504431Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2954: Remove pool /Root/users/user-1:hdd from database 2026-01-08T15:32:59.504540Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3074: Add tenant removal info for /Root/users/user-1 txid=1767886379364650 code=SUCCESS errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2026-01-08T15:32:59.538720Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2026-01-08T15:32:59.538787Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:32:59.540472Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014184734017957:2392], Recipient [1:7593014167554147223:2217]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886379364650&action=2" } UserToken: "" } 2026-01-08T15:32:59.540493Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:32:59.540678Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886379364650&action=2" ready: true status: SUCCESS } } 2026-01-08T15:32:59.651088Z node 1 :HIVE WARN: tx__status.cpp:58: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2026-01-08T15:32:59.651311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> DataShardWrite::WriteCommitVersion >> TxUsage::WriteToTopic_Demo_43_Query >> TTxDataShardRecomputeKMeansScan::BuildTable+WithForeign [GOOD] >> TTxDataShardRecomputeKMeansScan::BuildTable-WithForeign ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2026-01-08T15:32:39.300452Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014098739155673:2080];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:39.300537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:39.586081Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: dc-1/.metadata/script_executions 2026-01-08T15:32:39.637461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:39.637582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:39.650902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:39.723582Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:39.797062Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: dc-1/.metadata/script_executions 2026-01-08T15:32:39.987923Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:7593014098739155910:2137] Handle TEvNavigate describe path dc-1 2026-01-08T15:32:39.987973Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:7593014098739156385:2454] HANDLE EvNavigateScheme dc-1 2026-01-08T15:32:39.988078Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014098739155917:2140], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:39.988187Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:889: [main][1:7593014098739156146:2289][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7593014098739155917:2140], cookie# 1 2026-01-08T15:32:39.990023Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014098739156164:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156161:2289], cookie# 1 2026-01-08T15:32:39.990085Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014098739156165:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156162:2289], cookie# 1 2026-01-08T15:32:39.990101Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:375: [replica][1:7593014098739156166:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156163:2289], cookie# 1 2026-01-08T15:32:39.990144Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014098739155567:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156164:2289], cookie# 1 2026-01-08T15:32:39.990174Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014098739155570:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156165:2289], cookie# 1 2026-01-08T15:32:39.990191Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1137: [1:7593014098739155573:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7593014098739156166:2289], cookie# 1 2026-01-08T15:32:39.990271Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014098739156164:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739155567:2050], cookie# 1 2026-01-08T15:32:39.990307Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014098739156165:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739155570:2053], cookie# 1 2026-01-08T15:32:39.990350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:384: [replica][1:7593014098739156166:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739155573:2056], cookie# 1 2026-01-08T15:32:39.990410Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014098739156146:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739156161:2289], cookie# 1 2026-01-08T15:32:39.990439Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:986: [main][1:7593014098739156146:2289][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2026-01-08T15:32:39.990459Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014098739156146:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739156162:2289], cookie# 1 2026-01-08T15:32:39.990492Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:999: [main][1:7593014098739156146:2289][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0, partial# 0 2026-01-08T15:32:39.990548Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:919: [main][1:7593014098739156146:2289][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 Cluster State: { } }: sender# [1:7593014098739156163:2289], cookie# 1 2026-01-08T15:32:39.990596Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:925: [main][1:7593014098739156146:2289][/dc-1] Sync cookie mismatch: sender# [1:7593014098739156163:2289], cookie# 1, current cookie# 0 2026-01-08T15:32:39.990662Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2651: HandleNotify: self# [1:7593014098739155917:2140], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2026-01-08T15:32:40.066870Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2526: ResolveCacheItem: self# [1:7593014098739155917:2140], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7593014098739156146:2289] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2026-01-08T15:32:40.067008Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [1:7593014098739155917:2140], cacheItem# { Subscriber: { Subscriber: [1:7593014098739156146:2289] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2026-01-08T15:32:40.079551Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [1:7593014103034123682:2455], recipient# [1:7593014098739156385:2454], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:32:40.079659Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:7593014098739156385:2454] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:32:40.226130Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:7593014098739156385:2454] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2026-01-08T15:32:40.246294Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:7593014098739156385:2454] Handle TEvDescribeSchemeResult Forward to# [1:7593014098739156384:2453] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 2026-01-08T15:32:40.264751Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:7593014098739155910:2137] Handle TEvProposeTransaction 2026-01-08T15:32:40.264803Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:7593014098739155910:2137] TxId# 281474976710657 ProcessProposeTransaction 2026-01-08T15:32:40.264911Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:7593014098739155910:2137] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7593014103034123689:2460] 2026-01-08T15:32:40.301489Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7593014098739155917:2140], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:32:40.301574Z nod ... 480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:00.671767Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014187796219913:4017], recipient# [3:7593014187796219912:2343], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.534352Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.534548Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014170616349690:3178] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:01.534672Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014192091187229:4021], recipient# [3:7593014192091187228:2344], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.642922Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.643101Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014157731447300:2779] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:01.643200Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014192091187231:4022], recipient# [3:7593014192091187230:2345], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.674725Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:01.674897Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014157731447300:2779] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:01.675019Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014192091187233:4023], recipient# [3:7593014192091187232:2346], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.538481Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.538649Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014170616349690:3178] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:02.538755Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014196386154550:4027], recipient# [3:7593014196386154549:2347], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.646838Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.647002Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014157731447300:2779] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:02.647109Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014196386154552:4028], recipient# [3:7593014196386154551:2348], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.674619Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2810: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7593014153436479114:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2026-01-08T15:33:02.674790Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1892: FillEntry for TNavigate: self# [3:7593014153436479114:2131], cacheItem# { Subscriber: { Subscriber: [3:7593014157731447300:2779] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2026-01-08T15:33:02.674903Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:268: Send result: self# [3:7593014196386154554:4029], recipient# [3:7593014196386154553:2349], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> KqpBatchDelete::TableNotExists >> KqpPg::ExplainColumnsReorder [GOOD] >> TGRpcCmsTest::AuthTokenTest [GOOD] >> TGRpcCmsTest::SimpleTenantsTest [GOOD] >> TGRpcCmsTest::DescribeOptionsTest [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence [GOOD] >> KqpPg::CreateTableIfNotExists_GenericQuery >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite >> BasicUsage::AlterTopicWithSharedConsumer_MoveDeadLetterPolicy [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_DisableDeadLetterPolicy >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AuthTokenTest [GOOD] Test command err: 2026-01-08T15:32:59.834365Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.019460s 2026-01-08T15:32:59.839861Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014184591405666:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:59.840531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:59.885303Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:00.398783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:00.398912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:00.408006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:00.514740Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:00.644143Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:00.810775Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:00.838678Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:00.873004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:00.873032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:00.873044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:00.873117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:01.398957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:01.529081Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7593014193181341170:2302], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:01.529146Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2026-01-08T15:33:01.529180Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:01.529192Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:01.529362Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" 2026-01-08T15:33:01.529587Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1767886381528925) 2026-01-08T15:33:01.591995Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2568: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1767886381528925 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2026-01-08T15:33:01.592329Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2628: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2026-01-08T15:33:01.598606Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2026-01-08T15:33:01.600000Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886381528925&action=1" } } } 2026-01-08T15:33:01.600160Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:01.600256Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:33:01.600416Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:33:01.601066Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2026-01-08T15:33:01.601199Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:33:01.609697Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2026-01-08T15:33:01.609757Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:01.609832Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7593014193181341176:2201], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:01.609853Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:01.609880Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:01.609891Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:01.609959Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:33:01.609989Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2026-01-08T15:33:01.610059Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3197: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2026-01-08T15:33:01.616473Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:33:01.616506Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:01.616514Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:01.616521Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:01.616577Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2026-01-08T15:33:01.616601Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1767886381528925 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:33:01.617998Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014193181341182:2303], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886381528925&action=1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" } 2026-01-08T15:33:01.618031Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:33:01.618247Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886381528925&action=1" } } 2026-01-08T15:33:01.624712Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:33:01.639560Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:01.646213Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:33:01.646269Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:597: TSubDomainManip(/Root/users/user-1) create subdomain 2026-01-08T15:33:01.676512Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:615: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" DatabaseName: "Root" PeerName: "ipv6:[::1]:40454" 2026-01-08T15:33:01.701589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:33:01.702505Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014193181341243:2306], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886381528925&action=1" } UserToken: "\n\014root@builtin\02 ... l-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.578353Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.578396Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.578481Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.578492Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.579008Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.642510Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014197476309048:2366], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.642572Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.642638Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.642972Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.642988Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.643578Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.667923Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014197476309062:2367], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.667952Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.667998Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.668113Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.668132Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.668739Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.674724Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014197476309071:2368], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.674752Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.674793Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.674883Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.674896Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.675420Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.686073Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014197476309079:2369], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.686100Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.686139Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.695828Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.703834Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.707180Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.707308Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:800: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710659 2026-01-08T15:33:02.707319Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:685: TSubdomainManip(/Root/users/user-1) done 2026-01-08T15:33:02.707366Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:701: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2026-01-08T15:33:02.707515Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7593014193181341288:2201], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2026-01-08T15:33:02.707541Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2026-01-08T15:33:02.707561Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:02.707570Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:02.707617Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:22: TTxUpdateConfirmedSubdomain for tenant /Root/users/user-1 to 2 2026-01-08T15:33:02.707638Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=RUNNING txid=1767886381528925 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:33:02.707722Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2904: Update database for /Root/users/user-1 confirmedsubdomain=2 2026-01-08T15:33:02.730425Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:42: TTxUpdateConfirmedSubdomain complete for /Root/users/user-1 2026-01-08T15:33:02.730466Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:02.730981Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7593014197476309093:2370], Recipient [1:7593014188886373217:2201]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)0\000" PeerName: "ipv6:[::1]:40454" } 2026-01-08T15:33:02.731044Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2026-01-08T15:33:02.731085Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2121: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.731173Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7593014188886373139:2210], Recipient [1:7593014188886373217:2201]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2026-01-08T15:33:02.731187Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2026-01-08T15:33:02.731758Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3744: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: RUNNING required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2026-01-08T15:33:02.845522Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:03.371062Z node 1 :HIVE WARN: tx__status.cpp:58: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2026-01-08T15:33:03.371464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> TestSqsTopicHttpProxy::TestSendMessageBatchLong [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] Test command err: 2026-01-08T15:33:01.979893Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014192581831285:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:01.979931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:01.992898Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008265s 2026-01-08T15:33:02.586276Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:02.829501Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:02.834483Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:02.845455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:02.845550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:02.891169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:03.002466Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:03.242063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:03.242092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:03.242099Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:03.242183Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:03.444593Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:03.756674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:04.135731Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:131: TTxProcessor(tenants) is now locking 2026-01-08T15:33:04.135751Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:143: TTxProcessor(tenants) is now locked by parent 2026-01-08T15:33:04.141133Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:102: TTxProcessor(tenants) is now active 2026-01-08T15:33:04.234074Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285140, Sender [1:7593014205466734159:2304], Recipient [1:7593014196876798916:2202]: NKikimr::NConsole::TEvConsole::TEvDescribeTenantOptionsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:46936" } 2026-01-08T15:33:04.234134Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:964: StateWork, processing event TEvConsole::TEvDescribeTenantOptionsRequest 2026-01-08T15:33:04.248650Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3326: Send TEvConsole::TEvDescribeTenantOptionsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.DescribeDatabaseOptionsResult] { storage_units { kind: "hdd2" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd1" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "ssd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "test" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } availability_zones { name: "dc-1" labels { key: "collocation" value: "disabled" } labels { key: "fixed_data_center" value: "DC-1" } } availability_zones { name: "any" labels { key: "any_data_center" value: "true" } labels { key: "collocation" value: "disabled" } } computational_units { kind: "slot" labels { key: "slot_type" value: "default" } labels { key: "type" value: "dynamic_slot" } allowed_availability_zones: "any" allowed_availability_zones: "dc-1" } } } } } |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::ExplainColumnsReorder [GOOD] Test command err: Trying to start YDB, gRPC: 19283, MsgBus: 29580 2026-01-08T15:31:28.441867Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013791341013241:2262];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:28.441926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:28.820669Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:28.828238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:28.828374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:28.970212Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:28.982450Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013791341012994:2081] 1767886288391233 != 1767886288391236 2026-01-08T15:31:28.993821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:29.096660Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:29.335058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:29.335084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:29.335091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:29.335168Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:29.442857Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:30.183485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:30.196361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:33.442769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013791341013241:2262];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:33.442846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:33.572645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812815850369:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.572822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.573265Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812815850381:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.573299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812815850382:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.573632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.578360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:33.597059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013812815850385:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:33.715331Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013812815850436:2541] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 6566, MsgBus: 19741 2026-01-08T15:31:34.723469Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593013818286190531:2158];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:34.742388Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:34.764025Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:34.854897Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:34.856472Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013818286190399:2081] 1767886294710386 != 1767886294710389 2026-01-08T15:31:34.871613Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:34.871698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:34.907738Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:34.995659Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:35.134932Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:35.134960Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:35.134967Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:35.135045Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:35.734738Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:35.804029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:35.815767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:31:39.119737Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013839761027770:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.119852Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.126866Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013839761027783:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.126949Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013839761027782:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.127001Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.132015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:39.150275Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013839761027786:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:31:39.236103Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013839761027837:2538] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 12234, MsgBus: 26389 2026-01-08T15:31:40.253759Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593013845004519545:2068];send_to= ... on_alter_subdomain.cpp:311) 2026-01-08T15:32:52.770610Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7593014129672310751:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:52.770716Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:55.025705Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014164032049985:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.025869Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.026377Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014164032049995:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.026443Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.078044Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.169454Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.268410Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014164032050162:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.268541Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.269086Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014164032050168:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.269144Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014164032050169:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.269188Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:55.275507Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:55.296072Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014164032050172:2354], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:32:55.392266Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014164032050223:2650] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ... waiting for SysViewsRoster update finished 2026-01-08T15:33:04.855710Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:04.868669Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:33:04.873054Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:316:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:33:04.873637Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:04.873850Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:05.317781Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:05.318013Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:05.358081Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [12:34:2081] 1767886378558252 != 1767886378558256 2026-01-08T15:33:05.368564Z node 12 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:05.423338Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:05.533972Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:05.960112Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:05.975316Z node 12 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:06.127033Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:864:2747], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:06.127227Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:875:2752], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:06.127367Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:06.128589Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:879:2756], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:06.128687Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:06.144290Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:06.289909Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:878:2755], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2026-01-08T15:33:06.326542Z node 12 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [12:935:2793] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } PreparedQuery: "c23d10d9-f45c1cfe-3a1edfb7-70055b7d" QueryAst: "(\n(let $1 (PgType \'int4))\n(let $2 \'(\'(\'\"_logical_id\" \'219) \'(\'\"_id\" \'\"a4fe7df-865b8436-1ebcd046-c0a1f37a\") \'(\'\"_partition_mode\" \'\"single\")))\n(let $3 (DqPhyStage \'() (lambda \'() (Iterator (AsList (AsStruct \'(\'\"x\" (PgConst \'1 $1)) \'(\'\"y\" (PgConst \'2 $1)))))) $2))\n(let $4 (DqCnResult (TDqOutput $3 \'\"0\") \'(\'\"y\" \'\"x\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($3) \'($4) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"x\" $1) \'(\'\"y\" $1))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" QueryPlan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{x: \\\"1\\\",y: \\\"2\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}" YdbResults { columns { name: "y" type { pg_type { oid: 23 } } } columns { name: "x" type { pg_type { oid: 23 } } } } QueryDiagnostics: "" >> DataShardTxOrder::ImmediateBetweenOnline_oo8 |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2026-01-08T15:33:01.261958Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014189641767425:2204];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:01.262003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:01.719432Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:01.758292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:01.758556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:01.803672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:01.856556Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:01.898405Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:02.230012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:02.230032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:02.230044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:02.230130Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:02.265189Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:02.609592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:02.723597Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7593014193936735700:2302], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:33670" } 2026-01-08T15:33:02.723649Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2026-01-08T15:33:02.723676Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:02.723691Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:02.723870Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:33670" 2026-01-08T15:33:02.724017Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1767886382722234) 2026-01-08T15:33:02.762812Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2568: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1767886382722234 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2026-01-08T15:33:02.763094Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2628: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2026-01-08T15:33:02.772473Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2026-01-08T15:33:02.773683Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886382722234&action=1" } } } 2026-01-08T15:33:02.773858Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:02.773936Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2026-01-08T15:33:02.774120Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2026-01-08T15:33:02.774699Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2026-01-08T15:33:02.774848Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:33:02.780550Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014193936735710:2303], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886382722234&action=1" } UserToken: "" } 2026-01-08T15:33:02.780582Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:33:02.780813Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886382722234&action=1" } } 2026-01-08T15:33:02.784887Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2026-01-08T15:33:02.784952Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:02.785031Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7593014193936735705:2228], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:02.785058Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2026-01-08T15:33:02.785074Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:02.785082Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:02.785150Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:33:02.785178Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2026-01-08T15:33:02.785248Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3197: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2026-01-08T15:33:02.788294Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:33:02.788352Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:02.788366Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:02.788379Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:02.788459Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2026-01-08T15:33:02.788489Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1767886382722234 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:33:02.792377Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2026-01-08T15:33:02.807793Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:02.807872Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:33:02.807882Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:597: TSubDomainManip(/Root/users/user-1) create subdomain 2026-01-08T15:33:02.817245Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:615: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" PeerName: "ipv6:[::1]:33670" 2026-01-08T15:33:02.819053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:33:02.828839Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:823: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 39 2026-01-08T15:33:02.828976Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:759: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710658 2026-01-08T15:33:02.832017Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:795: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710658 2026-01-08T15:33:02.837493Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014193936735787:2306], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886382722234&action=1" } UserToken: "" } 2026-01-08T15:33:02.837539Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:33:02.837714Z node 1 :CMS_TENANTS TRACE: console_tenants_manager ... e::TEvPoolAllocated 2026-01-08T15:33:03.820320Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:03.820329Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:03.820361Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2026-01-08T15:33:03.820377Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3038: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=2 2026-01-08T15:33:03.827087Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2026-01-08T15:33:03.827116Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:03.830563Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7593014198231703563:2368], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:33670" } 2026-01-08T15:33:03.830627Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2026-01-08T15:33:03.831424Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3412: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { paths: "/Root/users/user-1" } } } } 2026-01-08T15:33:03.838228Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285130, Sender [1:7593014198231703602:2369], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvRemoveTenantRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:33670" } 2026-01-08T15:33:03.838251Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:969: StateWork, processing event TEvConsole::TEvRemoveTenantRequest 2026-01-08T15:33:03.838265Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2026-01-08T15:33:03.838282Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2026-01-08T15:33:03.838392Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant.cpp:53: TTxRemoveTenant: Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:33670" 2026-01-08T15:33:03.838550Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3137: Update tenant state in database for /Root/users/user-1 state=REMOVING_SUBDOMAIN txid=1767886383837017 errorcode=STATUS_CODE_UNSPECIFIED issue= 2026-01-08T15:33:03.838613Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3170: Update user token in database for /Root/users/user-1 2026-01-08T15:33:03.838676Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3183: Update peerName in database for /Root/users/user-1 2026-01-08T15:33:03.842889Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant.cpp:92: TTxRemoveTenant Complete 2026-01-08T15:33:03.843102Z node 1 :CMS_TENANTS TRACE: console__remove_tenant.cpp:99: Send: NKikimr::NConsole::TEvConsole::TEvRemoveTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886383837017&action=2" } } } 2026-01-08T15:33:03.843163Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2026-01-08T15:33:03.844249Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:775: TSubdomainManip(/Root/users/user-1)::Bootstrap 2026-01-08T15:33:03.845701Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:872: TSubdomainManip(/Root/users/user-1) got describe result: Status: StatusSuccess Path: "/Root/users/user-1" PathDescription { Self { Name: "user-1" PathId: 39 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1767886382878 ParentPathId: 38 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186224037895 Coordinators: 72075186224037893 Coordinators: 72075186224037896 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037892 Mediators: 72075186224037890 Mediators: 72075186224037889 SchemeShard: 72075186224037897 Hive: 72075186224037888 SysViewProcessor: 72075186224037891 StatisticsAggregator: 72075186224037894 } DomainKey { SchemeShard: 72057594046644480 PathId: 39 } StoragePools { Name: "/Root/users/user-1:hdd" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 39 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 39 PathOwnerId: 72057594046644480 2026-01-08T15:33:03.845730Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:629: TSubDomainManip(/Root/users/user-1) drop subdomain 2026-01-08T15:33:03.845906Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:642: TSubdomainManip(/Root/users/user-1) send subdomain drop cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root/users" OperationType: ESchemeOpForceDropExtSubDomain Drop { Name: "user-1" } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" PeerName: "ipv6:[::1]:33670" 2026-01-08T15:33:03.849168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5826: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 39] by tx: 281474976710660 2026-01-08T15:33:03.855193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpForceDropExtSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp:342) 2026-01-08T15:33:03.882885Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014198231703628:2370], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886383837017&action=2" } UserToken: "" } 2026-01-08T15:33:03.882913Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:33:03.883089Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886383837017&action=2" } } 2026-01-08T15:33:03.886635Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:823: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2026-01-08T15:33:03.886685Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:759: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710660 2026-01-08T15:33:03.892811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5826: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 39] by tx: 281474976710660 2026-01-08T15:33:03.905376Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:795: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710660 2026-01-08T15:33:03.942456Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7593014198231703678:2372], Recipient [1:7593014189641767783:2228]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886383837017&action=2" } UserToken: "" } 2026-01-08T15:33:03.942484Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2026-01-08T15:33:03.942713Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3344: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1767886383837017&action=2" } } 2026-01-08T15:33:03.953788Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found - using supplied 72075186224037897 2026-01-08T15:33:03.953915Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,7) wasn't found - using supplied 72075186224037890 2026-01-08T15:33:03.953958Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found - using supplied 72075186224037893 2026-01-08T15:33:03.953990Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,10) wasn't found - using supplied 72075186224037894 2026-01-08T15:33:03.954247Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,9) wasn't found - using supplied 72075186224037891 2026-01-08T15:33:03.954280Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found - using supplied 72075186224037892 2026-01-08T15:33:03.954363Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found - using supplied 72075186224037895 2026-01-08T15:33:03.954391Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,8) wasn't found - using supplied 72075186224037889 2026-01-08T15:33:03.954421Z node 1 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found - using supplied 72075186224037896 2026-01-08T15:33:03.957822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:2 2026-01-08T15:33:03.957953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:7 2026-01-08T15:33:03.958019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:4 2026-01-08T15:33:03.958054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:41: Got DeleteTabletReply with Forward response from Hive 72057594037968897 to Hive 72075186224037888 shardIdx 72057594046644480:10 |86.9%| [TM] {BAZEL_UPLOAD} ydb/services/cms/ut/unittest >> TestSqsTopicHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> DataShardWrite::DistributedInsertReadSetWithoutLocks+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 >> TestSqsTopicHttpProxy::TestReceiveMessage [GOOD] >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |86.9%| [TA] $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/sqs_topic_ut/unittest >> TestSqsTopicHttpProxy::TestSendMessageBatchLong [GOOD] Test command err: 2026-01-08T15:32:51.796302Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014150668174739:2237];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:51.796961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:52.310938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:52.311065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:52.357929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:52.533349Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:52.533876Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:52.540239Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014150668174527:2081] 1767886371755740 != 1767886371755743 2026-01-08T15:32:52.805416Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:52.805676Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:52.832826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:52.832851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:52.832858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:52.833009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:53.120313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:53.372480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:32:53.390566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:32:53.403904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:32:53.431011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2026-01-08T15:32:53.506563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.657683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.750766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2026-01-08T15:32:53.755888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.810506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2026-01-08T15:32:53.815434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.912923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.997070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.072129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.142052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.221622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.294409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:56.400453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014172143012667:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:56.400555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014172143012675:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:56.400626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:56.401423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014172143012682:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:56.401492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:56.405372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:56.424731Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014172143012681:2373], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2026-01-08T15:32:56.481578Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014172143012734:3106] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 54], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:56.785569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014150668174739:2237];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:56.790201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:57.020089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:57.100887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:57.143016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but pr ... [0][StateIdle] Process MLP pending events. Count 0 2026-01-08T15:33:09.387505Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:183: [72075186224037907][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:33:09.387620Z node 2 :PERSQUEUE DEBUG: partition.cpp:1433: [72075186224037907][Partition][0][StateIdle] Handle TEvPQ::TEvTxCommit Step 1767886389430, TxId 281474976710690 2026-01-08T15:33:09.387638Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:09.387647Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 1, PendingWrites: 0 2026-01-08T15:33:09.387656Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:09.387674Z node 2 :PERSQUEUE DEBUG: partition.cpp:2480: [72075186224037907][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxPendingCommit(TTransaction[ProposeConfig]) 2026-01-08T15:33:09.387815Z node 2 :PERSQUEUE DEBUG: partition.cpp:3950: [72075186224037907][Partition][0][StateIdle] Schedule reply tx done 281474976710690 2026-01-08T15:33:09.387828Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 1 2026-01-08T15:33:09.387837Z node 2 :PERSQUEUE DEBUG: partition.cpp:2342: [72075186224037907][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:33:09.387849Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:09.388053Z node 2 :PERSQUEUE DEBUG: read.h:275: [72075186224037907][PQCacheProxy]CacheProxy. Passthrough write request to KV 2026-01-08T15:33:09.397719Z node 2 :PERSQUEUE DEBUG: partition.cpp:2153: [72075186224037907][Partition][0][StateIdle] Received TEvKeyValue::TEvResponse 2026-01-08T15:33:09.397909Z node 2 :PERSQUEUE DEBUG: partition_mlp.cpp:122: [72075186224037907][Partition][0][StateIdle] Initializing MLP Consumers: 1 2026-01-08T15:33:09.397921Z node 2 :PERSQUEUE INFO: partition_mlp.cpp:127: [72075186224037907][Partition][0][StateIdle] Updateing MLP consumer 'consumer' config 2026-01-08T15:33:09.398022Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:572: [72075186224037907][Partition][0][StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2026-01-08T15:33:09.398045Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:09.398056Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:09.398069Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:09.398081Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:09.398090Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:09.398120Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:183: [72075186224037907][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:33:09.398511Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:399: [PQ: 72075186224037907] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic1" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/topic1" YcCloudId: "cloud4" YcFolderId: "folder4" YdbDatabaseId: "database4" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 Type: CONSUMER_TYPE_MLP KeepMessageOrder: false DeadLetterPolicyEnabled: false DeadLetterPolicy: DEAD_LETTER_POLICY_UNSPECIFIED MaxProcessingAttempts: 0 DefaultProcessingTimeoutSeconds: 20 } MonitoringProjectId: "" 2026-01-08T15:33:09.398574Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:947: [PQ: 72075186224037907] metering mode METERING_MODE_REQUEST_UNITS 2026-01-08T15:33:09.398686Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3626: [PQ: 72075186224037907] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2026-01-08T15:33:09.400149Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1059: [PQ: 72075186224037907] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) { status: SUCCESS, issues: }ct.IsSuccess() = 1, ct.IsTransportError() = 0, ct.GetEndpoint() = [::]:9508 { status: SUCCESS, issues: }consumer 2026-01-08T15:33:09.416565Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:97: (#37,[::1]:60638) incoming connection opened 2026-01-08T15:33:09.416635Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:170: (#37,[::1]:60638) -> (POST /Root, 1406 bytes) 2026-01-08T15:33:09.416777Z node 2 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [58c6:2ecf:e47b:0:40c6:2ecf:e47b:0] request [SendMessageBatch] url [/Root] database [/Root] requestId: 5c1445e7-5b2fec0d-b91c7a19-11deb771 2026-01-08T15:33:09.417745Z node 2 :HTTP_PROXY INFO: http_req.cpp:1324: http request [SendMessageBatch] requestId [5c1445e7-5b2fec0d-b91c7a19-11deb771] got new request from [58c6:2ecf:e47b:0:40c6:2ecf:e47b:0] database '/Root' stream '' 2026-01-08T15:33:09.418388Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.schemas.getMetadata) 2026-01-08T15:33:09.418450Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.databases.connect) 2026-01-08T15:33:09.418476Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.databases.create) 2026-01-08T15:33:09.418498Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.tables.select) 2026-01-08T15:33:09.418520Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.tables.write) 2026-01-08T15:33:09.418545Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.streams.write) 2026-01-08T15:33:09.418570Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:491: Ticket **** (C9049D91) asking for AccessServiceAuthorization(ydb.databases.list) 2026-01-08T15:33:09.428212Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.tables.write now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.429223Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.tables.select now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.429269Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.databases.create now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.430300Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.schemas.getMetadata now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.430357Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.streams.write now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.435468Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1465: Ticket **** (C9049D91) permission ydb.databases.list now has a valid subject "Service1_id@as" 2026-01-08T15:33:09.436800Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1480: Ticket **** (C9049D91) permission ydb.databases.connect now has a permanent error "Permission Denied" retryable:0 2026-01-08T15:33:09.436890Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1875: Ticket **** (C9049D91) () has now valid token of Service1_id@as 2026-01-08T15:33:09.437018Z node 2 :HTTP_PROXY DEBUG: auth_actors.cpp:130: http request [SendMessageBatch] requestId [5c1445e7-5b2fec0d-b91c7a19-11deb771] [auth] Authorized successfully 2026-01-08T15:33:09.437074Z node 2 :HTTP_PROXY INFO: http_req.cpp:1077: http request [SendMessageBatch] requestId [5c1445e7-5b2fec0d-b91c7a19-11deb771] sending grpc request to '' database: '/Root' iam token size: 0 2026-01-08T15:33:09.437843Z node 2 :HTTP_PROXY DEBUG: http_req.cpp:1254: http request [SendMessageBatch] requestId [5c1445e7-5b2fec0d-b91c7a19-11deb771] Not retrying GRPC response. Code: 400, Error: AWS.SimpleQueueService.TooManyEntriesInBatchRequest 2026-01-08T15:33:09.437939Z node 2 :HTTP_PROXY INFO: http_req.cpp:1602: http request [SendMessageBatch] requestId [5c1445e7-5b2fec0d-b91c7a19-11deb771] reply with status: STATUS_UNDEFINED message: The batch request contains more entries than permissible. Http output full {"__type":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest","message":"The batch request contains more entries than permissible."} 2026-01-08T15:33:09.438140Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:304: (#37,[::1]:60638) <- (400 AWS.SimpleQueueService.TooManyEntriesInBatchRequest, 134 bytes) 2026-01-08T15:33:09.438241Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:313: (#37,[::1]:60638) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: AmazonSQS.SendMessageBatch X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { "QueueUrl":"/v1/5//Root/6/topic1/8/consumer", "Entries": [ { "Id":"Id-1", "MessageGroupId":"MessageGroupId-1", "MessageBody":"MessageBody-1" }, { "Id":"Id-2", "MessageGroupId":"MessageGroupId-2", "MessageBody":"MessageBody-2" }, { "Id":"Id-3", "MessageGroupId":"MessageGroupId-3", "MessageBody":"MessageBody-3" }, { "Id":"Id-4", "MessageGroupId":"MessageGroupId-4", "MessageBody":"MessageBody-4" }, { "Id":"Id-5", "MessageGroupId":"MessageGroupId-5", "MessageBody":"MessageBody-5" }, { "Id":"Id-6", "MessageGroupId":"MessageGroupId-6", "MessageBody":"MessageBody-6" 2026-01-08T15:33:09.438273Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:320: (#37,[::1]:60638) Response: HTTP/1.1 400 AWS.SimpleQueueService.TooManyEntriesInBatchRequest Connection: close x-amzn-requestid: 5c1445e7-5b2fec0d-b91c7a19-11deb771 Content-Type: application/x-amz-json-1.1 Content-Length: 134 2026-01-08T15:33:09.438387Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:365: (#37,[::1]:60638) connection closed 2026-01-08T15:33:09.490725Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:09.490760Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:09.490775Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:09.490793Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:09.490804Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/http_proxy/ut/sqs_topic_ut/unittest >> DataShardStats::BackupTableStatsReportInterval [GOOD] >> DataShardStats::CollectKeySampleLeader >> Yq_1::DeleteConnections >> Yq_1::CreateConnection_With_Existing_Name >> KqpBatchDelete::ManyPartitions_1 >> KqpBatchDelete::TableNotExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/sqs_topic_ut/unittest >> TestSqsTopicHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] Test command err: 2026-01-08T15:32:53.312585Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014158598675479:2196];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:53.312745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:53.349395Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:53.869349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:53.870705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:53.885027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:54.241535Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:54.248183Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:54.250626Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014158598675298:2081] 1767886373226574 != 1767886373226577 2026-01-08T15:32:54.372115Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:54.474130Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:54.507182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:54.507207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:54.507214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:54.507309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:54.807051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:54.818492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:32:55.059731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:32:55.073877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:32:55.096832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2026-01-08T15:32:55.199855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.440493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.539912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2026-01-08T15:32:55.545167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.659176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2026-01-08T15:32:55.668592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.718264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.764608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.827741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.909888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:55.996518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:56.063906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:58.266499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014158598675479:2196];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:58.266579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:58.547837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014180073513443:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:58.547954Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:58.548561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014180073513455:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:58.548608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014180073513456:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:58.548644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:58.553370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:58.570010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710673, at schemeshard: 72057594046644480 2026-01-08T15:32:58.573420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014180073513459:2375], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2026-01-08T15:32:58.646822Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014180073513510:3107] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 54], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:59.181798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:59.260547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operat ... n duration: 18ms 2026-01-08T15:33:09.596843Z node 2 :SQS DEBUG: executor.cpp:83: Request [] Starting executor actor for query(idx=GET_USER_SETTINGS_ID). Mode: COMPILE_AND_EXEC 2026-01-08T15:33:09.597207Z node 2 :SQS TRACE: executor.cpp:154: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Serializing params: {"FROM_USER": "", "FROM_NAME": "", "BATCH_SIZE": 1000} 2026-01-08T15:33:09.597255Z node 2 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2026-01-08T15:33:09.597298Z node 2 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2026-01-08T15:33:09.597397Z node 2 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 18ms 2026-01-08T15:33:09.597458Z node 2 :SQS TRACE: executor.cpp:203: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\022\nFlags\010Name\010Args\016Payload\022Parameter\016Account\nValue\014Member\022SetResult\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\006\207\203\001H\207\203\001H\207\203\001H\026\006\032\"\000\003?\006\020settings\t\211\004?\026\205\004?\026\203\014\020List$Truncated\203\004\036\000\t\211\026?\036\203\005\004\200\205\006\203\004\203\004\203\004\026\006\032\213\004\203\001H\203\001H\213\000\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?$ \000\001\205\000\000\000\000\0011\000\000\000\000\000\000\000?\024\005?,\003?&\002\003?(\004\003?*\006\007\013?2\t\351\000?.\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?V\003?X\022FROM_USER\003\022\000\t\351\000?0\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?l\003?n\022FROM_NAME\003\022\000\000\013?4\003?6\000\t\351\000?8\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?\210\003?\212\024BATCH_SIZE\003\022\000\003?:\000\003?<\000\006\004?@\003\203\014\000\003\203\014\000\003\003?B\000\277\007\003? \000\002\001\000\006\002?\002\t\211\004\202\203\005@?\034\"\000\003?\262\022truncated\t\211\004?\034?\036\203\004\036\000?\250\003?\270\002\002\001\000\003/" } Params { Bin: "\037\000\005\205\006\203\010\203\001H\203\001H(BATCH_SIZE$FROM_NAME$FROM_USER\003?\000\241\017\003?\002\000\003?\004\000\007/" } FlatMKQL: true } } ExecTimeoutPeriod: 60000 }. Params: {"FROM_USER": "", "FROM_NAME": "", "BATCH_SIZE": 1000} 2026-01-08T15:33:09.597964Z node 2 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2026-01-08T15:33:09.610658Z node 2 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2026-01-08T15:33:09.610698Z node 2 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 13ms 2026-01-08T15:33:09.610914Z node 2 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2026-01-08T15:33:09.610943Z node 2 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2026-01-08T15:33:09.611005Z node 2 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 20ms 2026-01-08T15:33:09.611344Z node 2 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2026-01-08T15:33:09.774763Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:559: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [2:7593014224932344635:2436]: Pool not found 2026-01-08T15:33:09.776930Z node 2 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2026-01-08T15:33:10.117577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:559: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [2:7593014224932344639:2439]: Pool not found 2026-01-08T15:33:10.118544Z node 2 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2026-01-08T15:33:10.121501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014229227312049:2457], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:10.121570Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:594: [WorkloadService] [TDatabaseFetcherActor] ActorId: [2:7593014229227312050:2458], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2026-01-08T15:33:10.121612Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:10.130867Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014229227312053:2459], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:10.130966Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:10.446380Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:559: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [2:7593014229227312047:2456]: Pool not found 2026-01-08T15:33:10.446563Z node 2 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/http_proxy/ut/sqs_topic_ut/unittest |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |86.9%| [TA] {RESULT} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |86.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest >> DataShardVolatile::DistributedWriteThenBulkUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc >> DataShardWrite::UncommittedUpdateLockNewRowAboveSnapshot [GOOD] >> DataShardWrite::UncommittedUpdateLockDeletedRowAboveSnapshot >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 >> KqpPg::PgUpdate-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup >> KqpPg::DeleteWithQueryService-useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableNotExists [GOOD] Test command err: Trying to start YDB, gRPC: 19128, MsgBus: 20102 2026-01-08T15:33:07.532986Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014215541457238:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:07.533164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:07.969262Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:08.059546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:08.059667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:08.071474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:08.146508Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:08.148520Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014215541457203:2081] 1767886387528573 != 1767886387528576 2026-01-08T15:33:08.255965Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:08.286763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:08.286800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:08.286806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:08.286881Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:08.564419Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:08.972379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:11.310010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014232721327279:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.310154Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.310759Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014232721327291:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.310806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014232721327292:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.310851Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.319750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:11.345362Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014232721327295:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:33:11.460997Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014232721327346:2537] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:11.818707Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014232721327355:2334], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiDeleteTable!
:2:35: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:11.827868Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=NGViMmY1NzUtMmRlOGZmNWYtYzdmNzA3MTctOGZkZTAxOGE=, ActorId: [1:7593014232721327253:2322], ActorState: ExecuteState, LegacyTraceId: 01kef3v20a3btnpv6870wqvnrw, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 2 column: 35 } message: "At function: KiDeleteTable!" end_position { row: 2 column: 35 } severity: 1 issues { position { row: 2 column: 35 } message: "Cannot find table \'db.[/Root/TestBatchNotExists]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 2 column: 35 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:11.953455Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014232721327383:2341], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:41: Error: At function: KiDeleteTable!
:3:41: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:11.955509Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=NGViMmY1NzUtMmRlOGZmNWYtYzdmNzA3MTctOGZkZTAxOGE=, ActorId: [1:7593014232721327253:2322], ActorState: ExecuteState, LegacyTraceId: 01kef3v2h0117nx787m32y6k3w, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 41 } message: "At function: KiDeleteTable!" end_position { row: 3 column: 41 } severity: 1 issues { position { row: 3 column: 41 } message: "Cannot find table \'db.[/Root/TestBatchNotExists]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 41 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/sqs_topic_ut/unittest >> TestSqsTopicHttpProxy::TestReceiveMessage [GOOD] Test command err: 2026-01-08T15:32:51.730475Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014150423718735:2156];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:51.730752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:52.146382Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:52.179629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:52.179765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:52.304512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:52.373817Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:52.381406Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014150423718615:2081] 1767886371632550 != 1767886371632553 2026-01-08T15:32:52.502746Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:52.627195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:52.627212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:52.627243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:52.627327Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:52.734542Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:53.157787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:53.170683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:32:53.514244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2026-01-08T15:32:53.538632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2026-01-08T15:32:53.548053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:32:53.584710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:32:53.685414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:53.962200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.046213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.146783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 2026-01-08T15:32:54.157043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.227352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.282572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.364083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.429968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.469824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:54.525574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:56.683036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014150423718735:2156];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:56.683114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:57.149195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014176193524055:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:57.149325Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:57.149685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014176193524068:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:57.149716Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014176193524067:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:57.149749Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:57.158794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:57.186868Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014176193524071:2375], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2026-01-08T15:32:57.274737Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014176193524123:3110] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 54], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:57.799631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:57.846864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:57.943819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but pr ... UEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.236838Z node 2 :PERSQUEUE DEBUG: partition_mlp.cpp:72: [72075186224037907][Partition][0][StateIdle] Handle TEvPQ::TEvMLPConsumerState Consumer: "consumer" CountersValues: 0 CountersValues: 1 CountersValues: 0 CountersValues: 0 CountersValues: 0 CountersValues: 0 CountersValues: 0 MessageLocksValues: 1 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 DeletedByRetentionPolicy: 0 DeletedByDeadlinePolicy: 0 DeletedByMovedToDLQ: 0 CPUUsage: 519 2026-01-08T15:33:10.297794Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.297827Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.297840Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.297856Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.297867Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.405665Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.405705Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.405718Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.405735Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.405746Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.514492Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.514533Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.514543Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.514561Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.514573Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.614625Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.614654Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.614666Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.614691Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.614702Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.718735Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.718783Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.718803Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.718831Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.718852Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.818503Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.818537Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.818548Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.818564Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.818578Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:10.930447Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:10.930481Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.930494Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:10.930510Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:10.930523Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:11.038449Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:11.038480Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.038492Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:11.038509Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.038519Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:11.142434Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:11.142461Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.142470Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:11.142482Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.142490Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:11.238962Z node 2 :PERSQUEUE DEBUG: partition_mlp.cpp:72: [72075186224037907][Partition][0][StateIdle] Handle TEvPQ::TEvMLPConsumerState Consumer: "consumer" CountersValues: 0 CountersValues: 1 CountersValues: 0 CountersValues: 0 CountersValues: 0 CountersValues: 0 CountersValues: 0 MessageLocksValues: 1 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLocksValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 MessageLockingDurationValues: 0 DeletedByRetentionPolicy: 0 DeletedByDeadlinePolicy: 0 DeletedByMovedToDLQ: 0 CPUUsage: 78 2026-01-08T15:33:11.239540Z node 2 :HTTP_PROXY INFO: http_req.cpp:1598: http request [ReceiveMessage] requestId [cda4550d-f8c6ef70-7d93e467-3781f048] reply ok 2026-01-08T15:33:11.239706Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:304: (#38,[::1]:49120) <- (200 , 2 bytes) 2026-01-08T15:33:11.239826Z node 2 :HTTP DEBUG: http_proxy_incoming.cpp:365: (#38,[::1]:49120) connection closed Http output full {} 2026-01-08T15:33:11.245402Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:11.245430Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.245441Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:11.245459Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.245471Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:11.349793Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:11.349837Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.349849Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:11.349867Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.349879Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist 2026-01-08T15:33:11.450486Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037907][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:11.450520Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.450531Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037907][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:11.450551Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037907][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:11.450563Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037907][Partition][0][StateIdle] Try persist |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/http_proxy/ut/sqs_topic_ut/unittest >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> KqpBatchDelete::TableWithIndex |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/sharding/ut/unittest >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query >> DataShardWrite::WriteCommitVersion [GOOD] >> DataShardWrite::WriteUniqueRowsInsertDuplicateBeforeCommit >> DataShardVolatile::VolatileTxAbortedOnSplit [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |86.9%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> KqpPg::CreateTableIfNotExists_GenericQuery [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 3588, MsgBus: 61334 2026-01-08T15:31:27.756724Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013786562005695:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:27.757476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:28.166508Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:28.346429Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013786562005649:2081] 1767886287747377 != 1767886287747380 2026-01-08T15:31:28.374952Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:28.396842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:28.396943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:28.483619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:28.487175Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:28.778548Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:28.790915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:28.790940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:28.790946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:28.791035Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:29.950862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:29.962962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:32.755518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013786562005695:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:32.755634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:32.927180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013808036843026:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:32.927297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:32.927796Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013808036843036:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:32.927853Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.001791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:33.214820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812331810459:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.214907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.215367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812331810464:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.215421Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013812331810465:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.215449Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.220131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:33.248911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013812331810468:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:31:33.397126Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013812331810519:2620] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 1 1 1 Trying to start YDB, gRPC: 1875, MsgBus: 15593 2026-01-08T15:31:35.446104Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593013824265521969:2194];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:35.446263Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:35.546497Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:35.714456Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013824265521802:2081] 1767886295403744 != 1767886295403747 2026-01-08T15:31:35.726665Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:35.731476Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:35.731579Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:35.754863Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:35.899709Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:35.930615Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:35.930637Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:35.930644Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:35.930728Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:36.360450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:36.371621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:36.438584Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:39.403471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013841445391873:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.403560Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.409284Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013841445391883:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:39.409379Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:3 ...
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.048680Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.049548Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014198957996725:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.049652Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.084500Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:03.266905Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014198957996824:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.267049Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.267705Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014198957996829:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.267786Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014198957996830:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.267984Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:03.274451Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:03.301738Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014198957996833:2349], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:33:03.399521Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014198957996884:2611] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 23949, MsgBus: 1987 2026-01-08T15:33:05.331423Z node 12 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7593014206862556431:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:05.331845Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:05.371184Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:05.610595Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:05.642670Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [12:7593014206862556382:2081] 1767886385328995 != 1767886385328998 2026-01-08T15:33:05.700934Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:05.701063Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:05.709981Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:05.717284Z node 12 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:05.774409Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:05.983256Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:05.983283Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:05.983296Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:05.983424Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:06.360297Z node 12 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:07.044598Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:07.054405Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:33:10.335704Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7593014206862556431:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:10.335867Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:11.998514Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014232632361057:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.998674Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.999897Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014232632361066:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:11.999989Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.073359Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:12.260200Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014236927328458:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.260349Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.260813Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014236927328464:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.260876Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.260934Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014236927328463:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:12.268028Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:12.286151Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7593014236927328467:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:33:12.381398Z node 12 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [12:7593014236927328522:2606] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] |86.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> TxUsage::The_TxWriteInfo_Is_Deleted_After_The_Immediate_Transaction >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail >> TxUsage::WriteToTopic_Demo_16_Query >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled |86.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |86.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize |86.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> TTxDataShardRecomputeKMeansScan::BuildTable-WithForeign [GOOD] >> TTxDataShardRecomputeKMeansScan::EmptyCluster |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |87.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme >> DataShardWrite::DistributedInsertReadSetWithoutLocks+Volatile [GOOD] >> DataShardWrite::DistributedInsertReadSetWithoutLocks-Volatile >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile [GOOD] >> DataShardWrite::InsertImmediate >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] Test command err: 2026-01-08T15:33:10.656455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:10.656529Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:10.663963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:33:10.692028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:33:10.692372Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2026-01-08T15:33:10.692658Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:33:10.804520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:33:10.821009Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:33:10.821116Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:33:10.823209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2026-01-08T15:33:10.823297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 9437184 2026-01-08T15:33:10.823374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 9437184 2026-01-08T15:33:10.823758Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:33:10.824086Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:33:10.824159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 9437184 persisting started state actor id [1:209:2157] in generation 2 2026-01-08T15:33:10.935109Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:33:11.021938Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 9437184 2026-01-08T15:33:11.022158Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:33:11.022269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 9437184, actorId: [1:223:2219] 2026-01-08T15:33:11.022334Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:33:11.022381Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2026-01-08T15:33:11.022442Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:33:11.022624Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.022687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.023029Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2026-01-08T15:33:11.023143Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2026-01-08T15:33:11.023279Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:33:11.023347Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:33:11.023469Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2026-01-08T15:33:11.023516Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2026-01-08T15:33:11.023557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:33:11.023596Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2026-01-08T15:33:11.023648Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:33:11.023780Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:219:2216], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.023818Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.023878Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:217:2215], serverId# [1:219:2216], sessionId# [0:0:0] 2026-01-08T15:33:11.033237Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:104:2137], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 104 RawX2: 4294969433 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2026-01-08T15:33:11.033331Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:33:11.033461Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2026-01-08T15:33:11.033633Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2026-01-08T15:33:11.033690Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2026-01-08T15:33:11.033746Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 1 at tablet 9437184 2026-01-08T15:33:11.033810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2026-01-08T15:33:11.033847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2026-01-08T15:33:11.033888Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2026-01-08T15:33:11.033948Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:33:11.034289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2026-01-08T15:33:11.034372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2026-01-08T15:33:11.034430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit FinishPropose 2026-01-08T15:33:11.034467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:33:11.034540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayComplete 2026-01-08T15:33:11.034575Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2026-01-08T15:33:11.034607Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit WaitForPlan 2026-01-08T15:33:11.034638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2026-01-08T15:33:11.034672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2026-01-08T15:33:11.053105Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2026-01-08T15:33:11.053203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:33:11.053242Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:33:11.053299Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2026-01-08T15:33:11.053377Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 9437184 not sending time cast registration request in state WaitScheme 2026-01-08T15:33:11.053975Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:229:2225], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.054034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.054080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:228:2224], serverId# [1:229:2225], sessionId# [0:0:0] 2026-01-08T15:33:11.054225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287424, Sender [1:104:2137], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2026-01-08T15:33:11.054268Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvTxProcessing::TEvPlanStep 2026-01-08T15:33:11.054445Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1811: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2026-01-08T15:33:11.054492Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1826: Execution status for [1000001:1] at 9437184 is Executed 2026-01-08T15:33:11.054554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2026-01-08T15:33:11.054604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [1000001:1] at 9437184 to execution unit PlanQueue 2026-01-08T15:33:11.063151Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 104 RawX2: 4294969433 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2026-01-08T15:33:11.063237Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:33:11.063525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.063593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.063657Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:33:11.063697Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:33:11.063732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2026-01-08T15:33:11.063783Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2026-01-08T15:33:11.063831Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [100000 ... :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.610010Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:134] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.610042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 134] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.610077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2026-01-08T15:33:18.610111Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.610213Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:18.610237Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.610254Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:137] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.610276Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 137] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.610332Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2026-01-08T15:33:18.610369Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.610459Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:18.610485Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:18.610513Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.610557Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:140] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.610598Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 140] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.610644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2026-01-08T15:33:18.610671Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.610775Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:18.610802Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:18.610830Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.610854Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:143] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.610917Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 143] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.610962Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2026-01-08T15:33:18.610993Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.611093Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.611137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:146] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.611171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 146] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.611215Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2026-01-08T15:33:18.611241Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.611339Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.611362Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:149] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.611391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 149] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.611451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2026-01-08T15:33:18.611499Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.611612Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:18.611638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2026-01-08T15:33:18.611692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:18.611771Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2026-01-08T15:33:18.611814Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:18.612067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2026-01-08T15:33:18.612108Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612145Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2026-01-08T15:33:18.612221Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:242:2234], Recipient [1:352:2320]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2026-01-08T15:33:18.612251Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612284Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2026-01-08T15:33:18.612391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2026-01-08T15:33:18.612420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612446Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2026-01-08T15:33:18.612525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:242:2234], Recipient [1:352:2320]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2026-01-08T15:33:18.612556Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612579Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2026-01-08T15:33:18.612670Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2026-01-08T15:33:18.612702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612728Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2026-01-08T15:33:18.612803Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2026-01-08T15:33:18.612840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.612867Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2026-01-08T15:33:18.612930Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2026-01-08T15:33:18.612980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.613023Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2026-01-08T15:33:18.613144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2026-01-08T15:33:18.613175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.613199Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2026-01-08T15:33:18.613271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2026-01-08T15:33:18.613301Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:18.613341Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_order/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] Test command err: 2026-01-08T15:33:11.477140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:11.477208Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:11.488367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:33:11.527909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:33:11.528306Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:136:2157] 2026-01-08T15:33:11.528601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:33:11.623725Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:113:2143], Recipient [1:136:2157]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:33:11.638951Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:33:11.639060Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:33:11.640855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2026-01-08T15:33:11.640935Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 9437184 2026-01-08T15:33:11.641059Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 9437184 2026-01-08T15:33:11.641434Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:33:11.641722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:33:11.641800Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 9437184 persisting started state actor id [1:209:2157] in generation 2 2026-01-08T15:33:11.795576Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:33:11.849630Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 9437184 2026-01-08T15:33:11.849868Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:33:11.849993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 9437184, actorId: [1:223:2219] 2026-01-08T15:33:11.850039Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 9437184 2026-01-08T15:33:11.850088Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2026-01-08T15:33:11.850170Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:33:11.850537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.850612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.850966Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2026-01-08T15:33:11.851086Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2026-01-08T15:33:11.851243Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:33:11.851323Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:33:11.851379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2026-01-08T15:33:11.851426Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2026-01-08T15:33:11.851465Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2026-01-08T15:33:11.851504Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2026-01-08T15:33:11.851593Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2026-01-08T15:33:11.851833Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:219:2216], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.851875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.851937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:217:2215], serverId# [1:219:2216], sessionId# [0:0:0] 2026-01-08T15:33:11.864589Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:104:2137], Recipient [1:136:2157]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 104 RawX2: 4294969433 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2026-01-08T15:33:11.864706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:33:11.864845Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2026-01-08T15:33:11.865030Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2026-01-08T15:33:11.865089Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2026-01-08T15:33:11.865152Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 1 at tablet 9437184 2026-01-08T15:33:11.865219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2026-01-08T15:33:11.865268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2026-01-08T15:33:11.865307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2026-01-08T15:33:11.865359Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:33:11.865698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2026-01-08T15:33:11.865742Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2026-01-08T15:33:11.865803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit FinishPropose 2026-01-08T15:33:11.865848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:33:11.865911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 9437184 is DelayComplete 2026-01-08T15:33:11.865942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2026-01-08T15:33:11.865978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 9437184 to execution unit WaitForPlan 2026-01-08T15:33:11.866012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2026-01-08T15:33:11.866053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2026-01-08T15:33:11.880506Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2026-01-08T15:33:11.880585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2026-01-08T15:33:11.880626Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:1] at 9437184 on unit FinishPropose 2026-01-08T15:33:11.880684Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2026-01-08T15:33:11.880748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 9437184 not sending time cast registration request in state WaitScheme 2026-01-08T15:33:11.881322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:229:2225], Recipient [1:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.881386Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:11.881429Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 9437184, clientId# [1:228:2224], serverId# [1:229:2225], sessionId# [0:0:0] 2026-01-08T15:33:11.881560Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287424, Sender [1:104:2137], Recipient [1:136:2157]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2026-01-08T15:33:11.881591Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvTxProcessing::TEvPlanStep 2026-01-08T15:33:11.881759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1811: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2026-01-08T15:33:11.881818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1826: Execution status for [1000001:1] at 9437184 is Executed 2026-01-08T15:33:11.881879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2026-01-08T15:33:11.881919Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [1000001:1] at 9437184 to execution unit PlanQueue 2026-01-08T15:33:11.896845Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 104 RawX2: 4294969433 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2026-01-08T15:33:11.896937Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2026-01-08T15:33:11.897202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:136:2157], Recipient [1:136:2157]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.897244Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:11.897304Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2026-01-08T15:33:11.897407Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:33:11.897456Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2026-01-08T15:33:11.897497Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2026-01-08T15:33:11.897545Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [100000 ... 19.249400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:134] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.249443Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 134] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.249488Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2026-01-08T15:33:19.249516Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.249638Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.249672Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.249718Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:137] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.249783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 137] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.249836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2026-01-08T15:33:19.249877Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.249991Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.250019Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.250039Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.250064Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.250090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:140] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.250126Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 140] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.250164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2026-01-08T15:33:19.250190Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.250291Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.250312Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2026-01-08T15:33:19.256822Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.256890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:143] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.256957Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 143] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.257030Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2026-01-08T15:33:19.257081Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.257253Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.257282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:146] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.257319Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 146] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.257362Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2026-01-08T15:33:19.257393Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.257495Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.257530Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:149] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.257585Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 149] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.257628Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2026-01-08T15:33:19.257652Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.257778Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2026-01-08T15:33:19.257805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2026-01-08T15:33:19.257838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:104:2137], exec latency: 2 ms, propose latency: 4 ms 2026-01-08T15:33:19.257888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2026-01-08T15:33:19.257919Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2026-01-08T15:33:19.258247Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2026-01-08T15:33:19.258289Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.258339Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2026-01-08T15:33:19.258498Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:242:2234], Recipient [1:352:2320]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2026-01-08T15:33:19.258531Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.258554Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2026-01-08T15:33:19.258688Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2026-01-08T15:33:19.258717Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.258741Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2026-01-08T15:33:19.258799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:242:2234], Recipient [1:352:2320]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2026-01-08T15:33:19.258830Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.258863Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2026-01-08T15:33:19.258939Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2026-01-08T15:33:19.258965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.258990Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2026-01-08T15:33:19.259106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2026-01-08T15:33:19.259145Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.259187Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2026-01-08T15:33:19.259294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2026-01-08T15:33:19.259322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.259348Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2026-01-08T15:33:19.259418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2026-01-08T15:33:19.259452Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.259477Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2026-01-08T15:33:19.259545Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [1:463:2405], Recipient [1:242:2234]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2026-01-08T15:33:19.259571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:19.259621Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_order/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_shred_reboots/ydb-core-tx-schemeshard-ut_shred_reboots |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_shred_reboots/ydb-core-tx-schemeshard-ut_shred_reboots |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_shred_reboots/ydb-core-tx-schemeshard-ut_shred_reboots >> BasicUsage::AlterTopicWithSharedConsumer_DisableDeadLetterPolicy [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_SetDeleteDeadLetterPolicy >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink >> DataShardWrite::UncommittedUpdateLockDeletedRowAboveSnapshot [GOOD] >> DataShardWrite::UncommittedUpdateLockUncommittedNewRow >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart >> KqpPg::TableArrayInsert-useSink [GOOD] >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] >> KqpPg::Returning+useSink |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink [GOOD] |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2026-01-08T15:33:20.895387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:20.895527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:20.895574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:20.895618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:20.895663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:20.895693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:20.895747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:20.895874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:20.896819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:20.897099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:21.096087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:21.096149Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:21.140994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:21.141254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:21.141437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:21.183316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:21.183599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:21.184342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:21.184615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:21.191055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:21.191295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:21.192557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:21.192619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:21.192730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:21.192791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:21.192834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:21.193038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:21.479615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.480665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.480798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.480909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.480994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:21.481812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 4037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 2026-01-08T15:33:23.523336Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5274: StateWork, processing event TEvHive::TEvDeleteTabletReply 2026-01-08T15:33:23.523417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2026-01-08T15:33:23.526790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:23.528486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:33:23.528615Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:23.528751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:33:23.528851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:33:23.528881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:23.531898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:3 2026-01-08T15:33:23.532042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:4 2026-01-08T15:33:23.532245Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877763, Sender [1:1308:3156], Recipient [1:686:2672]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037968897 ClientId: [1:1308:3156] ServerId: [1:1311:3159] } 2026-01-08T15:33:23.532290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5344: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2026-01-08T15:33:23.532322Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6177: Client pipe, to tablet: 72057594037968897, from:72057594046678944 is reset TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2026-01-08T15:33:23.532756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2026-01-08T15:33:23.532805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2026-01-08T15:33:23.533478Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877761, Sender [1:1324:3172], Recipient [1:686:2672]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:23.533539Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5345: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:23.533578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6167: Pipe server connected, at tablet: 72057594046678944 2026-01-08T15:33:23.533706Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271124996, Sender [1:911:2844], Recipient [1:686:2672]: NKikimrScheme.TEvNotifyTxCompletion TxId: 104 2026-01-08T15:33:23.533741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5261: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2026-01-08T15:33:23.533811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2026-01-08T15:33:23.533907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2026-01-08T15:33:23.533960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1322:3170] 2026-01-08T15:33:23.534164Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877764, Sender [1:1324:3172], Recipient [1:686:2672]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:33:23.534209Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5346: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:33:23.534242Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6215: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2026-01-08T15:33:23.534944Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1325:3173], Recipient [1:686:2672]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2026-01-08T15:33:23.535003Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:23.535115Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:23.535360Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 225us result status StatusSuccess 2026-01-08T15:33:23.535934Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "Topic1" PathId: 38 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 31 UsedReserveSize: 31 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:33:23.536845Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271188001, Sender [1:1326:3174], Recipient [1:686:2672]: NKikimrPQ.TEvPeriodicTopicStats PathId: 40 Generation: 1 Round: 6 DataSize: 151 UsedReserveSize: 151 2026-01-08T15:33:23.536904Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5286: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2026-01-08T15:33:23.536995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 40] DataSize 151 UsedReserveSize 151 2026-01-08T15:33:23.537036Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2026-01-08T15:33:23.537496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1327:3175], Recipient [1:686:2672]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2026-01-08T15:33:23.537540Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:23.537652Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:23.540324Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 202us result status StatusSuccess 2026-01-08T15:33:23.540901Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "Topic1" PathId: 38 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 182 UsedReserveSize: 182 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> TAsyncComputeActorTest::InputTransformMultichannel [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:21.850546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:21.850730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:21.850804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:21.850856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:21.850954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:21.850998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:21.851088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:21.851227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:21.852413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:21.852765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:22.107289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:22.107353Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:22.156341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:22.156743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:22.156941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:22.195830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:22.196311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:22.197097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:22.197978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:22.208603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:22.208840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:22.210094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:22.210163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:22.210282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:22.210348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:22.210413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:22.210573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:22.428171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.429941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.430496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 5:33:24.362130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.362310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.362394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.362509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.362712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.362809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.363321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.363694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.363820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.363893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.364098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.364179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.364257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.364672Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:33:24.377193Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:24.377337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:24.384276Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435083, Sender [1:922:2853], Recipient [1:922:2853]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:24.384360Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5311: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:24.385598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:24.385706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:24.386285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:24.386373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:24.386427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:24.386470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:24.389415Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 274399233, Sender [1:958:2853], Recipient [1:922:2853]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:24.389485Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5421: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:24.389524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:922:2853] sender: [1:980:2058] recipient: [1:15:2062] 2026-01-08T15:33:24.443624Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:979:2898], Recipient [1:922:2853]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2026-01-08T15:33:24.443692Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:24.443829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:24.444076Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 222us result status StatusSuccess 2026-01-08T15:33:24.444570Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "Topic1" PathId: 38 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:33:24.445461Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271188001, Sender [1:981:2899], Recipient [1:922:2853]: NKikimrPQ.TEvPeriodicTopicStats PathId: 38 Generation: 1 Round: 96 DataSize: 19 UsedReserveSize: 7 2026-01-08T15:33:24.445535Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5286: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2026-01-08T15:33:24.445588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 38] DataSize 19 UsedReserveSize 7 2026-01-08T15:33:24.445657Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2026-01-08T15:33:24.445741Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2026-01-08T15:33:24.446000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:982:2900], Recipient [1:922:2853]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2026-01-08T15:33:24.446049Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:24.446137Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:24.446471Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 177us result status StatusSuccess 2026-01-08T15:33:24.447002Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "Topic1" PathId: 38 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestPgYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |87.0%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TComputeActorAsyncInputHelperTest::PollAsyncInput [GOOD] >> TComputeActorTest::Empty ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:33:11.770664Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:11.918974Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:33:11.919371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:11.919531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:12.312604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:12.312782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:12.401564Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886388806079 != 1767886388806083 2026-01-08T15:33:12.413768Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:12.457488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:12.545285Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:12.920678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:12.934518Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:13.047857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:13.101318Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:33:13.102410Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:33:13.102730Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:33:13.102968Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:33:13.166708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:33:13.167530Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:33:13.167629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:33:13.172114Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:33:13.172212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:33:13.172276Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:33:13.172691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:33:13.172872Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:33:13.172986Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:33:13.186908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:33:13.255315Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:33:13.255528Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:33:13.255635Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:33:13.255671Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:33:13.255704Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:33:13.255736Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:33:13.256001Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:13.256070Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:13.256450Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:33:13.256554Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:33:13.256697Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:33:13.256745Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:33:13.256802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:33:13.256839Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:33:13.256874Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:33:13.256903Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:33:13.256941Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:33:13.257361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:13.257417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:13.257472Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:33:13.257545Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:33:13.257611Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:33:13.257713Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:33:13.257951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:33:13.258019Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:33:13.258107Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:33:13.258166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:33:13.258205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:33:13.258243Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:33:13.258285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:33:13.258615Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:33:13.258666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:33:13.258715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:33:13.258753Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:33:13.258815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:33:13.258848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:33:13.258884Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:33:13.258935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:33:13.258966Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:33:13.260448Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:33:13.260496Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:33:13.274126Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:33:13.274205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... 19fe12y41461qvb5. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. InternalError: SCHEME_ERROR KIKIMR_SCHEME_MISMATCH: {
: Error: Table '/Root/table-1' scheme changed., code: 2028 }. 2026-01-08T15:33:24.355662Z node 2 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715662;task_id=1;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2026-01-08T15:33:24.358253Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2026-01-08T15:33:24.358559Z node 2 :KQP_COMPUTE DEBUG: log.cpp:841: self_id=[2:1092:2906];scan_id=1;tx_id=281474976715662;fline=kqp_scan_fetcher_actor.cpp:106;event=TEvTerminateFromCompute;sender=[2:1089:2903];info={
: Error: COMPUTE_STATE_FAILURE }; 2026-01-08T15:33:24.358660Z node 2 :KQP_COMPUTE DEBUG: log.cpp:841: self_id=[2:1092:2906];scan_id=1;tx_id=281474976715662;fline=kqp_scan_compute_manager.h:321;event=abort_all_scanners;error_message=Send abort execution from compute actor, message: {
: Error: COMPUTE_STATE_FAILURE }; 2026-01-08T15:33:24.359098Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [2:1085:2877] TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [2:1089:2903] TaskId# 1 State# COMPUTE_STATE_FAILURE Stats# {CpuTimeUs: 304905 Tasks { TaskId: 1 CpuTimeUs: 301719 Tables { TablePath: "/Root/table-1" } ComputeCpuTimeUs: 16 BuildCpuTimeUs: 301703 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-wgi6cnz7jm" NodeId: 2 CreateTimeMs: 1767886403283 CurrentWaitInputTimeUs: 163846 UpdateTimeMs: 1767886404355 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:33:24.359213Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [2:1089:2903] 2026-01-08T15:33:24.359350Z node 2 :KQP_EXECUTER INFO: {KQPEX@kqp_executer_impl.h:1210} ActorId: [2:1085:2877] TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Task does not have the CA id yet or is already complete TaskId# 1 trace_id# 2026-01-08T15:33:24.359449Z node 2 :KQP_EXECUTER INFO: {KQPEX@kqp_executer_impl.h:1203} ActorId: [2:1085:2877] TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Aborting compute actor execution Issues# {
: Error: Terminate execution } ComputeActor# [2:1090:2904] TaskId# 2 trace_id# 2026-01-08T15:33:24.359539Z node 2 :KQP_EXECUTER INFO: {KQPEX@kqp_executer_impl.h:1203} ActorId: [2:1085:2877] TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Aborting compute actor execution Issues# {
: Error: Terminate execution } ComputeActor# [2:1091:2905] TaskId# 3 trace_id# 2026-01-08T15:33:24.359725Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [2:1085:2877] TxId: 281474976715662. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:24.359876Z node 2 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [2:1090:2904], TxId: 281474976715662, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3vcwj19fe12y41461qvb5. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646735 2026-01-08T15:33:24.359974Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [2:1090:2904], TxId: 281474976715662, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3vcwj19fe12y41461qvb5. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Handle abort execution event from: [2:1085:2877], status: SCHEME_ERROR, reason: {
: Error: Terminate execution } 2026-01-08T15:33:24.360149Z node 2 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715662;task_id=2;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2026-01-08T15:33:24.361757Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2026-01-08T15:33:24.362097Z node 2 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [2:1091:2905], TxId: 281474976715662, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3vcwj19fe12y41461qvb5. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646735 2026-01-08T15:33:24.362175Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [2:1091:2905], TxId: 281474976715662, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3vcwj19fe12y41461qvb5. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Handle abort execution event from: [2:1085:2877], status: SCHEME_ERROR, reason: {
: Error: Terminate execution } 2026-01-08T15:33:24.362299Z node 2 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715662;task_id=3;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2026-01-08T15:33:24.365545Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 3. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2026-01-08T15:33:24.366089Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, ActorId: [2:1058:2877], ActorState: ExecuteState, LegacyTraceId: 01kef3vcwj19fe12y41461qvb5, Create QueryResponse for error on request, msg: status# ABORTED issues# { message: "Table \'/Root/table-1\' scheme changed." issue_code: 2028 severity: 1 }{ message: "Query invalidated on scheme/internal error during Scan execution" issue_code: 2019 severity: 1 } trace_id# 2026-01-08T15:33:24.366730Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:352: actor# [2:68:2115] Handle TEvExecuteKqpTransaction 2026-01-08T15:33:24.366803Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:341: actor# [2:68:2115] TxId# 281474976715664 ProcessProposeKqpTransaction 2026-01-08T15:33:24.367536Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 3000, txId: 281474976715661] shutting down 2026-01-08T15:33:24.367649Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [2:68:2115] Handle TEvProposeTransaction 2026-01-08T15:33:24.367717Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [2:68:2115] TxId# 0 ProcessProposeTransaction 2026-01-08T15:33:24.367854Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:288: actor# [2:68:2115] Cookie# 0 userReqId# "" txid# 0 reqId# [2:1125:2937] SnapshotReq marker# P0 2026-01-08T15:33:24.368315Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:152: Actor# [2:1127:2937] txid# 0 HANDLE EvNavigateKeySetResult TResolveTablesActor marker# P1 ErrorCount# 0 2026-01-08T15:33:24.368519Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:270: TxId: 281474976715664. Resolved key sets: 0 2026-01-08T15:33:24.368648Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:585: TxId: 281474976715664. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, MayRunTasksLocally 1, snapshot: {0, 0} 2026-01-08T15:33:24.368729Z node 2 :KQP_EXECUTER INFO: {KQPDATA@kqp_data_executer.cpp:2746} ActorId: [2:1124:2877] TxId: 281474976715664. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks total_tasks# 0 read_only# true datashard_txs# 0 ev_write_txs# 0 topic_txs# 0 volatile# false immediate# true pending_compute_tasks# 0 use_followers# false trace_id# 2026-01-08T15:33:24.368859Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [2:1124:2877] TxId: 281474976715664. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:24.368938Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1124} ActorId: [2:1124:2877] TxId: 281474976715664. Ctx: { TraceId: 01kef3vcwj19fe12y41461qvb5, Database: , SessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Resource usage for last stat interval, rate limiter was not found Consumption# ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 RequestUnits# 1 ForceFlag# true trace_id# 2026-01-08T15:33:24.369251Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:926: Forwarded response to sender actor, requestId: 3, sender: [2:830:2725], selfId: [2:66:2113], source: [2:1058:2877] 2026-01-08T15:33:24.369317Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:272: Actor# [2:1127:2937] txid# 0 HANDLE EvResolveKeySetResult TResolveTablesActor marker# P2 ErrorCount# 0 2026-01-08T15:33:24.369748Z node 2 :TX_PROXY DEBUG: snapshotreq.cpp:1451: Actor# [2:1125:2937] SEND TEvDiscardVolatileSnapshotRequest to datashard 72075186224037888 marker# P3 2026-01-08T15:33:24.370744Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1252: Session closed, sessionId: ydb://session/3?node_id=2&id=OTJmMjNlZGMtZWMwMmVkZmMtNzY5MDhjYWEtZmUxZjc0Nzk=, workerId: [2:1058:2877], local sessions count: 0 2026-01-08T15:33:24.370931Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553190, Sender [2:1125:2937], Recipient [2:886:2766]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 38 Step: 3000 TxId: 281474976715661 Response { QueryIssues { message: "Table \'/Root/table-1\' scheme changed." issue_code: 2028 severity: 1 } QueryIssues { message: "Query invalidated on scheme/internal error during Scan execution" issue_code: 2019 severity: 1 } TxMeta { } } YdbStatus: ABORTED ConsumedRu: 660 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_order/unittest >> DataShardVolatile::VolatileTxAbortedOnDrop [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink >> TComputeActorTest::Empty [GOOD] >> TComputeActorTest::ReceiveData [GOOD] >> TComputeActorTest::PollAckPoll [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovementWithIdleness [GOOD] >> TDqSourceWatermarkTrackerTest::IdleFirstShouldReturnNothing [GOOD] >> TDqSourceWatermarkTrackerTest::Idle1 [GOOD] >> TDqSourceWatermarkTrackerTest::IdleNextCheckAt [GOOD] >> TIssuesBufferTest::TestEmpty [GOOD] >> TIssuesBufferTest::TestSimplePush [GOOD] >> TIssuesBufferTest::TestPushWithOverflow [GOOD] >> TIssuesBufferTest::TestSmallBuffer [GOOD] >> TIssuesBufferTest::TestUseAfterDump [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname [GOOD] >> KqpPg::CheckPgAutoParams+useSink >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop >> DataShardWrite::InsertImmediate [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 >> TSyncComputeActorTest::Empty [GOOD] >> DataShardWrite::DistributedInsertReadSetWithoutLocks-Volatile [GOOD] >> DataShardWrite::DistributedInsertWithoutLocks+Volatile >> DataShardWrite::WriteUniqueRowsInsertDuplicateBeforeCommit [GOOD] >> DataShardWrite::WriteUniqueRowsInsertDuplicateAtCommit >> TSyncComputeActorTest::Basic >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> Sharding::XXUsage >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] >> Sharding::XXUsage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:33:12.358003Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:12.491741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:33:12.492168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:12.492344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:12.832218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:12.832405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:12.939988Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886388816023 != 1767886388816027 2026-01-08T15:33:12.951695Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:13.004226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:13.145314Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:13.535203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:13.552409Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:13.676834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:13.713626Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:33:13.715135Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:33:13.715468Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:33:13.715717Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:33:13.761794Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:33:13.762605Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:33:13.762716Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:33:13.764513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:33:13.764593Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:33:13.764659Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:33:13.765053Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:33:13.765221Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:33:13.765310Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:33:13.776571Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:33:13.805106Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:33:13.805364Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:33:13.805512Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:33:13.805555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:33:13.805619Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:33:13.805661Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:33:13.805947Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:13.806085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:33:13.806492Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:33:13.806619Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:33:13.806783Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:33:13.806830Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:33:13.806911Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:33:13.806961Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:33:13.807000Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:33:13.807037Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:33:13.807084Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:33:13.807559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:13.807604Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:13.807655Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:33:13.807800Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:33:13.807869Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:33:13.807990Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:33:13.808223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:33:13.808290Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:33:13.808397Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:33:13.808458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:33:13.808512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:33:13.808547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:33:13.808582Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:33:13.808935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:33:13.808982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:33:13.809039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:33:13.809076Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:33:13.809139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:33:13.809172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:33:13.809210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:33:13.809244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:33:13.809288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:33:13.810911Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:33:13.810965Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:33:13.822106Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:33:13.822202Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... eamingQuery: 0}. Shards nodes resolved SuccessNodes# 1 FailedNodes# 0 trace_id# 2026-01-08T15:33:26.221585Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:283} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Shards on nodes: node 2: [72075186224037888] trace_id# 2026-01-08T15:33:26.221664Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:585: TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks: 1, readonly: true, 1 scan tasks on 1 nodes, localComputeTasks: 0, MayRunTasksLocally 1, snapshot: {0, 0} 2026-01-08T15:33:26.221987Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [2:1411:3134] 2026-01-08T15:33:26.222070Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [2:1411:3134], channels: 1 2026-01-08T15:33:26.222194Z node 2 :KQP_EXECUTER INFO: {KQPDATA@kqp_data_executer.cpp:2746} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks total_tasks# 1 read_only# true datashard_txs# 0 ev_write_txs# 0 topic_txs# 0 volatile# false immediate# true pending_compute_tasks# 0 use_followers# false trace_id# 2026-01-08T15:33:26.222264Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [2:1411:3134], trace_id# 2026-01-08T15:33:26.222361Z node 2 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1411:3134], trace_id# 2026-01-08T15:33:26.222422Z node 2 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:2350} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Immediate tx, become ExecuteState current_state# WaitResolveState immediate# true trace_id# 2026-01-08T15:33:26.223230Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [2:1411:3134] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {} trace_id# 2026-01-08T15:33:26.223306Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [2:1411:3134], trace_id# 2026-01-08T15:33:26.223373Z node 2 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1411:3134], trace_id# 2026-01-08T15:33:26.223851Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [2:1412:3134], Recipient [2:1349:3101]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2026-01-08T15:33:26.223991Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:33:26.224054Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v5001/281474976715667 IncompleteEdge# v{min} UnprotectedReadEdge# v3000/18446744073709551615 ImmediateWriteEdge# v3000/18446744073709551615 ImmediateWriteEdgeReplied# v5001/18446744073709551615 2026-01-08T15:33:26.224109Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037888 changed HEAD read to non-repeatable v6000/18446744073709551615 2026-01-08T15:33:26.224205Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2026-01-08T15:33:26.224299Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:26.224350Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:33:26.224393Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:26.224431Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:33:26.224484Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2026-01-08T15:33:26.224530Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:26.224555Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:26.224578Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:33:26.224601Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:33:26.224703Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2026-01-08T15:33:26.224908Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[2:1412:3134], 0} after executionsCount# 1 2026-01-08T15:33:26.224973Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[2:1412:3134], 0} sends rowCount# 1, bytes# 32, quota rows left# 32766, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:26.225057Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[2:1412:3134], 0} finished in read 2026-01-08T15:33:26.225115Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:26.225141Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:33:26.225165Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:33:26.225189Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:33:26.225233Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:26.225254Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:33:26.225278Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:3] at 72075186224037888 has finished 2026-01-08T15:33:26.225320Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:33:26.226132Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [2:1412:3134], Recipient [2:1349:3101]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:33:26.226196Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } 2026-01-08T15:33:26.226476Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [2:1411:3134] TaskId# 1 State# COMPUTE_STATE_FINISHED Stats# {CpuTimeUs: 749 Tasks { TaskId: 1 CpuTimeUs: 206 FinishTimeMs: 1767886406225 Tables { TablePath: "/Root/table-1" ReadRows: 1 ReadBytes: 8 AffectedPartitions: 1 } IngressRows: 1 ComputeCpuTimeUs: 77 BuildCpuTimeUs: 129 HostName: "ghrun-wgi6cnz7jm" NodeId: 2 StartTimeMs: 1767886406225 CreateTimeMs: 1767886406222 UpdateTimeMs: 1767886406225 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:33:26.226615Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [2:1411:3134] 2026-01-08T15:33:26.226816Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:26.226889Z node 2 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1124} ActorId: [2:1407:3134] TxId: 281474976715671. Ctx: { TraceId: 01kef3vgf1146tybskfhmdhm4p, Database: , SessionId: ydb://session/3?node_id=2&id=OTQ1YmIyNGUtMzVhNmVhNmUtNjEyYzY3OTEtNDMwZjdjYWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Resource usage for last stat interval, rate limiter was not found Consumption# ComputeTime: 0.000749s ReadRows: 1 ReadBytes: 8 RequestUnits# 1 ForceFlag# true trace_id# { items { uint32_value: 7 } items { uint32_value: 4 } } |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_order/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/sharding/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] Test command err: 10206759138305689309 8734783009871766524 15128506013544661336 13247298401309786832 18041460058792647032 15689779675575164249 11862501275010346362 16867293676189166814 8334500272125050197 5817404474985820818 1615669927485480438 12570810219846238851 17156578685047213271 8295941136044689045 12094297740681271124 12327652591974051104 220675167140617348 2453699739257318127 2357031177781166564 5259528623020535486 1371800463213966980 15652944276165486460 7647206709406516935 14366829490287981297 14718491032946144008 5164961287678308972 7701896481397266031 5162039970718531548 15920183485285154850 4770602333211235018 7814970665086387503 15345211878841922204 14469915219881116970 14111274282934632491 8585997154098842366 15837941100787936291 14664857540642638168 6735434708737117135 2639863887500532875 1349845972553251647 2865397997825499194 15849803260105989422 3522207430634286676 3905581669502610448 746721013554556137 3427403843736559138 12846427259019592687 1834610332202640364 8072295592675559220 12238720360718219174 17441904902140181336 14273150841385183163 2561056653152746032 3113350602319331053 8209205274332064398 10297208348173733287 13810118256607558153 15432382899549317773 5912248664332135430 7935159562353875144 2865397997825499194 14813530108318261450 4050850448940887034 16674041437146482073 7351956153298753079 11976918182234151706 18131102441436377445 2396381005419731864 7392185175100928345 15540221802114841143 185819218382225139 890861307934132708 15379845357848669690 17616050843696208374 5944866763574352606 4309653994297861922 7648792211533329865 2432224981742364269 1271040598745726688 13259931223028844465 16991577290226695422 10462081049492684368 9129381048523251743 14929477740008674722 10078149344953786550 9161335104907385051 14788228854093740540 4731824712582224492 4159941638546679139 8542928825518385431 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/sharding/ut/unittest |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/sharding/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:24.319337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:24.319439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:24.319478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:24.319511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:24.319566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:24.319596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:24.319649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:24.319734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:24.320599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:24.320876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:24.411236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:24.411295Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:24.428190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:24.428615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:24.428828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:24.454638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:24.455124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:24.456052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:24.456895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:24.460960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:24.461186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:24.462487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:24.462559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:24.462706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:24.462762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:24.462828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:24.463009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:24.676855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.678920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:24.679521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.670138Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][0][StateIdle] Try persist 2026-01-08T15:33:27.712027Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][1][StateIdle] Process user action and tx events 2026-01-08T15:33:27.712106Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712140Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][1][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.712177Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712207Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][1][StateIdle] Try persist 2026-01-08T15:33:27.712287Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][2][StateIdle] Process user action and tx events 2026-01-08T15:33:27.712337Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712367Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.712402Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712427Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][2][StateIdle] Try persist 2026-01-08T15:33:27.712493Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:27.712519Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712676Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.712705Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.712730Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][0][StateIdle] Try persist 2026-01-08T15:33:27.754642Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][1][StateIdle] Process user action and tx events 2026-01-08T15:33:27.754732Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.754773Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][1][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.754815Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.754850Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][1][StateIdle] Try persist 2026-01-08T15:33:27.755036Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][2][StateIdle] Process user action and tx events 2026-01-08T15:33:27.755072Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.755100Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.755129Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.755158Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][2][StateIdle] Try persist 2026-01-08T15:33:27.755218Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:27.755245Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.755269Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.755299Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.755322Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][0][StateIdle] Try persist 2026-01-08T15:33:27.790537Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:159: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2026-01-08T15:33:27.790622Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:445: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2026-01-08T15:33:27.791010Z node 1 :PERSQUEUE DEBUG: partition.cpp:1025: [72075186233409546][Partition][0][StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2026-01-08T15:33:27.791136Z node 1 :PERSQUEUE DEBUG: partition.cpp:1025: [72075186233409546][Partition][2][StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2026-01-08T15:33:27.791221Z node 1 :PERSQUEUE DEBUG: partition.cpp:1025: [72075186233409546][Partition][1][StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2026-01-08T15:33:27.791625Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:523: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 38 Generation: 2 StatsReportRound: 3 DataSize: 16975298 UsedReserveSize: 16975298 2026-01-08T15:33:27.791667Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1843: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2026-01-08T15:33:27.791900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 38] DataSize 16975298 UsedReserveSize 16975298 2026-01-08T15:33:27.813797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2026-01-08T15:33:27.834753Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][1][StateIdle] Process user action and tx events 2026-01-08T15:33:27.834836Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.834873Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][1][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.834913Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][1][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.834947Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][1][StateIdle] Try persist 2026-01-08T15:33:27.835045Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][2][StateIdle] Process user action and tx events 2026-01-08T15:33:27.835078Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.835101Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][2][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.835144Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][2][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.835169Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][2][StateIdle] Try persist 2026-01-08T15:33:27.835226Z node 1 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186233409546][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:27.835253Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.835293Z node 1 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186233409546][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:27.835323Z node 1 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186233409546][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:27.835348Z node 1 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186233409546][Partition][0][StateIdle] Try persist 2026-01-08T15:33:27.856710Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:27.856987Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 325us result status StatusSuccess 2026-01-08T15:33:27.857536Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false } PersQueueGroup { Name: "Topic1" PathId: 38 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> KqpPg::V1CreateTable [GOOD] >> KqpPg::ValuesInsert+useSink >> TTxDataShardRecomputeKMeansScan::EmptyCluster [GOOD] >> TTxDataShardReshuffleKMeansScan::BadRequest |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |87.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/sharding/ut/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 >> TxUsage::WriteToTopic_Demo_43_Query [GOOD] >> BasicUsage::PropagateSessionClosed >> KqpBatchDelete::TableWithIndex [GOOD] |87.0%| [TA] $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:18.545235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:18.545337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:18.545376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:18.545430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:18.545483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:18.545523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:18.545574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:18.545671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:18.546899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:18.547234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:18.637769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:18.637840Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:18.649382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:18.649755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:18.649964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:18.695790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:18.696312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:18.697044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:18.703310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:18.707927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:18.708156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:18.709442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:18.709504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:18.709614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:18.709658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:18.709696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:18.709855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:18.901648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.902760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.902883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.902979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:18.903919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2514: TTxInit for ChannelsBinding, read records: 6, at schemeshard: 72057594046678944 2026-01-08T15:33:30.001481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2893: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.001590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2972: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.001920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3473: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.001988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3509: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.002780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.003779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.004075Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:33:30.011887Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:30.012139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:30.016505Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435083, Sender [1:1532:3464], Recipient [1:1532:3464]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:30.016591Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5311: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:30.018012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:30.018106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:30.024454Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271124999, Sender [1:1532:3464], Recipient [1:1532:3464]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:30.024529Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5255: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:30.024864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:30.024944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:30.025042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:30.025089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:30.027030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 274399233, Sender [1:1568:3464], Recipient [1:1532:3464]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:30.027096Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5421: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:30.027161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1532:3464] sender: [1:1589:2058] recipient: [1:15:2062] 2026-01-08T15:33:30.077332Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1588:3509], Recipient [1:1532:3464]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:33:30.077396Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:30.077505Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:33:30.077805Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 293us result status StatusSuccess 2026-01-08T15:33:30.079399Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 1 MinPartitionsCount: 20 MaxPartitionsCount: 20 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 23235 Memory: 141504 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> BasicUsage::GetAllStartPartitionSessions >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] >> TxUsage::WriteToTopic_Demo_45_Table >> BasicUsage::WaitEventBlocksBeforeDiscovery >> BasicUsage::WriteSessionNoAvailableDatabase ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:18.626042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:18.626155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:18.626203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:18.626245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:18.626353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:18.626398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:18.626453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:18.626539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:18.627545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:18.627904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:18.814975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:18.815050Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:18.836841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:18.838452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:18.838666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:18.889017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:18.889492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:18.890340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:18.894611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:18.908413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:18.908650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:18.909863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:18.909934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:18.910085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:18.910140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:18.910189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:18.910380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:19.160627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.161773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.161953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.162949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:19.163041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... T15:33:30.366443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2514: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2026-01-08T15:33:30.366784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2893: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.366919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2972: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.367295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3473: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.367371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3509: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.367700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.367891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.367946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.368094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.368290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.368374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.368799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:30.369857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:33:30.376267Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:30.376468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:30.380932Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435083, Sender [1:1412:3356], Recipient [1:1412:3356]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:30.381004Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5311: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:30.382510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:30.382617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:30.383582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271124999, Sender [1:1412:3356], Recipient [1:1412:3356]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:30.383644Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5255: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:30.383929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:30.383995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:30.384053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:30.384107Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:30.386979Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 274399233, Sender [1:1448:3356], Recipient [1:1412:3356]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:30.387033Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5421: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:30.387076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1412:3356] sender: [1:1469:2058] recipient: [1:15:2062] 2026-01-08T15:33:30.428061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1468:3401], Recipient [1:1412:3356]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:33:30.428151Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:30.428282Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:33:30.428685Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 344us result status StatusSuccess 2026-01-08T15:33:30.429645Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82624 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink >> KqpErrors::ResolveTableError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 63695, MsgBus: 24832 2026-01-08T15:33:14.861076Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014245277739265:2073];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:14.873996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:14.945577Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:15.406459Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:15.670774Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014245277739230:2081] 1767886394826875 != 1767886394826878 2026-01-08T15:33:15.672098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:15.672186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:15.677065Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:15.690296Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:15.692888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:15.875364Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:15.895322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:15.895343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:15.895348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:15.895418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:16.456373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:16.533460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:16.769546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:17.019595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:17.113999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:19.264343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014266752577585:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:19.264445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:19.264888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014266752577595:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:19.264946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:19.663524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:19.737042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:19.837842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:19.838617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014245277739265:2073];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:19.838690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:19.912696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:19.952111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:20.015672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:20.085518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:20.152268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:20.306829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014271047545767:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:20.306912Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:20.308301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014271047545772:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:20.308357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014271047545773:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:20.308544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:20.318740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:20.372501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014271047545776:2490], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:33:20.446856Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014271047545827:3774] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:23.431686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:23.602587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:23.691669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:26.634010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:30.228176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:33:30.228222Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest >> DataShardWrite::UncommittedUpdateLockUncommittedNewRow [GOOD] >> DataShardWrite::UncommittedUpdateLockUncommittedDeleteRow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:25.380618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:25.380714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:25.380760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:25.380824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:25.380887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:25.380923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:25.380978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:25.381082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:25.382014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:25.386492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:25.547226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:25.547311Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:25.570877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:25.571273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:25.571496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:25.588000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:25.588439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:25.589244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:25.592751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:25.597230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:25.597465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:25.598741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:25.598809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:25.598923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:25.598972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:25.599007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:25.599181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:25.752400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.753935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:25.754579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByRea ... thId: 37] was 0 2026-01-08T15:33:31.436673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for Columns, read records: 2, at schemeshard: 72057594046678944 2026-01-08T15:33:31.436878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2153: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.437039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2211: TTxInit for Shards, read records: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:31.437095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:33:31.437258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2297: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:31.437502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2363: TTxInit for TableShardPartitionConfigs, read records: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:31.437887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2514: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2026-01-08T15:33:31.438260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2893: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.438422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2972: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.438848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3473: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.438962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3509: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.439279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.439478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.443413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.443611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.443892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.444016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.444675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.450500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.450719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.450788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.450981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.451046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.451106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:31.464780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:31.480932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:31.481040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:31.482239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:31.482346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:31.482413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:31.500951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1156:3109] sender: [1:1211:2058] recipient: [1:15:2062] 2026-01-08T15:33:31.568567Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:31.568857Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeTable" took 345us result status StatusSuccess 2026-01-08T15:33:31.569389Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeTable" PathDescription { Self { Name: "SomeTable" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SomeTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 Family: 1 FamilyName: "alternative" NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 4140 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { PoolsUsage { PoolKind: "pool-kind-1" DataSize: 1020 IndexSize: 0 } PoolsUsage { PoolKind: "pool-kind-2" DataSize: 3120 IndexSize: 0 } } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82624 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:33:31.583693Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:33:31.583994Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 362us result status StatusSuccess 2026-01-08T15:33:31.584688Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000037 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710657 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "metadata@system" ACL: "" ChildrenExist: true } Children { Name: "SomeTable" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> KqpErrors::ProposeError >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table >> DataShardWrite::DistributedInsertWithoutLocks+Volatile [GOOD] >> DataShardWrite::DistributedInsertDuplicateWithLocks+Volatile >> KqpErrors::ProposeResultLost_RwTx+UseSink >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit >> BasicUsage::FallbackToSingleDb >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> DataShardWrite::WriteUniqueRowsInsertDuplicateAtCommit [GOOD] >> DataShardWrite::VolatileAndNonVolatileWritePlanStepCommitFailure >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |87.0%| [TA] {RESULT} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table >> BasicUsage::RetryDiscoveryWithCancel >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table >> BasicUsage::WriteSessionCloseWaitsForWrites >> KqpPg::Returning+useSink [GOOD] >> KqpPg::Returning-useSink >> DataShardVolatile::UpsertNoLocksArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink >> BasicUsage::AlterTopicWithSharedConsumer_SetDeleteDeadLetterPolicy [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_SetMoveDeadLetterPolicy >> DataShardVolatile::DistributedWriteLostPlanThenDrop [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit >> Yq_1::CreateConnection_With_Existing_Name [GOOD] >> Yq_1::CreateConnections_With_Idempotency >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> TTxDataShardReshuffleKMeansScan::BadRequest [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToPosting >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table >> TxUsage::The_TxWriteInfo_Is_Deleted_After_The_Immediate_Transaction [GOOD] >> DataShardWrite::UncommittedUpdateLockUncommittedDeleteRow [GOOD] >> DataShardWrite::LocksBrokenStats >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate >> RetryPolicy::RetryWithBatching [GOOD] |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |87.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |87.0%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> DataShardWrite::DistributedInsertDuplicateWithLocks+Volatile [GOOD] >> DataShardWrite::DistributedInsertDuplicateWithLocks-Volatile >> Yq_1::DeleteConnections [GOOD] >> Yq_1::Create_And_Modify_The_Same_Connection |87.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> DataShardWrite::VolatileAndNonVolatileWritePlanStepCommitFailure [GOOD] >> TxUsage::WriteToTopic_Demo_45_Table [GOOD] |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |87.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo >> TxUsage::WriteToTopic_Demo_45_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::VolatileAndNonVolatileWritePlanStepCommitFailure [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:32:31.750568Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:31.906418Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:32:31.906873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:32:31.907088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:32:32.219467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:32.219639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:32.320589Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886347559688 != 1767886347559692 2026-01-08T15:32:32.332530Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:32.387836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:32.517554Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:32:32.967951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:32.986102Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:33.112242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:33.160743Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:32:33.161816Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:32:33.162107Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:32:33.162397Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:32:33.225941Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:32:33.228464Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:32:33.228600Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:32:33.230437Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:32:33.230545Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:32:33.230606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:32:33.231051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:32:33.231236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:32:33.231373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:32:33.247014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:32:33.305466Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:32:33.305689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:32:33.305817Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:32:33.305858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:32:33.305891Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:32:33.305924Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:32:33.306175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:33.306230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:33.306589Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:32:33.306674Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:32:33.306748Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:32:33.306785Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:32:33.306821Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:32:33.306854Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:32:33.306885Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:32:33.306931Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:32:33.306973Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:32:33.307405Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:33.307455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:33.307497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:32:33.307571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:32:33.307604Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:32:33.307706Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:32:33.307978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:32:33.308044Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:32:33.308150Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:32:33.308235Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:32:33.308288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:32:33.308324Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:32:33.308356Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:32:33.308650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:32:33.308687Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:32:33.308721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:32:33.308754Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:32:33.308798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:32:33.308832Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:32:33.308866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:32:33.308912Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:32:33.308949Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:32:33.310443Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:32:33.310494Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:32:33.321441Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:32:33.321518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:33:42.611868Z node 10 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2026-01-08T15:33:42.612018Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [10:921:2784], Recipient [10:1151:2957]: {TEvReadSet step# 3001 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2026-01-08T15:33:42.612059Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:42.612102Z node 10 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890012 ... validating table 2026-01-08T15:33:42.884756Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [10:1203:2996], Recipient [10:1151:2957]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:42.884863Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:42.884946Z node 10 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [10:1202:2995], serverId# [10:1203:2996], sessionId# [0:0:0] 2026-01-08T15:33:42.901446Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [10:1208:2999], Recipient [10:1151:2957]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:33:42.901655Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:33:42.901774Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2026-01-08T15:33:42.901910Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:42.901977Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:33:42.902031Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:42.902086Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:33:42.902149Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2026-01-08T15:33:42.902214Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:42.902245Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:42.902272Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:33:42.902300Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:33:42.902898Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:33:42.903256Z node 10 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2026-01-08T15:33:42.903335Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[10:1208:2999], 0} after executionsCount# 1 2026-01-08T15:33:42.903423Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[10:1208:2999], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:42.903551Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[10:1208:2999], 0} finished in read 2026-01-08T15:33:42.903679Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:42.903715Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:33:42.903759Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:33:42.903793Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:33:42.903850Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:3] at 72075186224037888 is Executed 2026-01-08T15:33:42.903882Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:33:42.903917Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:3] at 72075186224037888 has finished 2026-01-08T15:33:42.903981Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:33:42.904138Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:33:42.904876Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [10:1208:2999], Recipient [10:1151:2957]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:33:42.904953Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } 2026-01-08T15:33:42.905129Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [10:1208:2999], Recipient [10:921:2784]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2026-01-08T15:33:42.905239Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2026-01-08T15:33:42.905302Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit CheckRead 2026-01-08T15:33:42.905368Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:42.905400Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit CheckRead 2026-01-08T15:33:42.905429Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:42.905460Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit BuildAndWaitDependencies 2026-01-08T15:33:42.905512Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037889 2026-01-08T15:33:42.905549Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:42.905579Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:42.905603Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:33:42.905630Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:33:42.905731Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2026-01-08T15:33:42.905966Z node 10 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v3001/18446744073709551615 2026-01-08T15:33:42.906014Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[10:1208:2999], 1} after executionsCount# 1 2026-01-08T15:33:42.906055Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[10:1208:2999], 1} sends rowCount# 3, bytes# 96, quota rows left# 996, quota bytes left# 5242784, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:42.906123Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[10:1208:2999], 1} finished in read 2026-01-08T15:33:42.906174Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:42.906204Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:33:42.906233Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:33:42.906263Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:33:42.906306Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:42.906555Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:33:42.906591Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:5] at 72075186224037889 has finished 2026-01-08T15:33:42.906624Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:33:42.906723Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:33:42.908413Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [10:1208:2999], Recipient [10:921:2784]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2026-01-08T15:33:42.908475Z node 10 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 1 } { items { int32_value: 1 } items { int32_value: 1001 } }, { items { int32_value: 2 } items { int32_value: 1004 } }, { items { int32_value: 11 } items { int32_value: 1002 } }, { items { int32_value: 12 } items { int32_value: 1003 } }, { items { int32_value: 13 } items { int32_value: 1004 } } |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_write/unittest >> KqpErrors::ResolveTableError [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink >> DataShardStats::CollectKeySampleLeader [GOOD] >> DataShardStats::CollectKeySampleFollower >> KqpPg::CheckPgAutoParams+useSink [GOOD] >> KqpPg::CheckPgAutoParams-useSink >> DataShardVolatile::DistributedWriteLostPlanThenSplit [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ResolveTableError [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:33:40.218005Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:40.218800Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:40.371661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:40.372785Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:40.945444Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:41.098595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:41.098765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:41.099321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:41.099427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:41.169457Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:41.170409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:41.170828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:41.273492Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:41.303154Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:42.074525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:42.145653Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:42.145803Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:42.514180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:43.910141Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:92: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2026-01-08T15:33:43.910306Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:187} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Bootstrap done, become ReadyState trace_id# 2026-01-08T15:33:43.910658Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:836} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Executing physical tx TxType# 2 Stages# 1 trace_id# 2026-01-08T15:33:43.910769Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:851} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Got request, become WaitResolveState trace_id# 2026-01-08T15:33:43.911034Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:270: TxId: 281474976710658. Resolved key sets: 1 2026-01-08T15:33:43.911240Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:294: TxId: 281474976710658. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 38] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2026-01-08T15:33:43.911716Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:585: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks: 1, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, MayRunTasksLocally 1, snapshot: {0, 0} 2026-01-08T15:33:43.912240Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [1:1859:3216] 2026-01-08T15:33:43.912318Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [1:1859:3216], channels: 0 2026-01-08T15:33:43.912421Z node 1 :KQP_EXECUTER INFO: {KQPDATA@kqp_data_executer.cpp:2746} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks total_tasks# 1 read_only# false datashard_txs# 0 ev_write_txs# 0 topic_txs# 0 volatile# false immediate# true pending_compute_tasks# 0 use_followers# false trace_id# 2026-01-08T15:33:43.912475Z node 1 :KQP_EXECUTER TRACE: {KQPDATA@kqp_data_executer.cpp:2750} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Updating channels after the creation of compute actors trace_id# 2026-01-08T15:33:43.912523Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [1:1859:3216] 2026-01-08T15:33:43.912580Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [1:1859:3216], channels: 0 2026-01-08T15:33:43.912656Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [1:1859:3216], trace_id# 2026-01-08T15:33:43.912729Z node 1 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1859:3216], trace_id# 2026-01-08T15:33:43.912790Z node 1 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:2350} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Immediate tx, become ExecuteState current_state# WaitResolveState immediate# true trace_id# 2026-01-08T15:33:43.927148Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [1:1859:3216] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {} trace_id# 2026-01-08T15:33:43.927300Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [1:1859:3216], trace_id# 2026-01-08T15:33:43.927417Z node 1 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1859:3216], trace_id# 2026-01-08T15:33:43.929089Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [1:1859:3216] TaskId# 1 State# COMPUTE_STATE_FINISHED Stats# {CpuTimeUs: 1986 Tasks { TaskId: 1 CpuTimeUs: 1080 FinishTimeMs: 1767886423928 EgressBytes: 30 EgressRows: 3 ComputeCpuTimeUs: 55 BuildCpuTimeUs: 1025 HostName: "ghrun-wgi6cnz7jm" NodeId: 1 CreateTimeMs: 1767886423913 UpdateTimeMs: 1767886423928 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:33:43.929261Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [1:1859:3216] 2026-01-08T15:33:43.929372Z node 1 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:220} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Send Commit to BufferActor buffer_actor_id# [1:1855:3216] trace_id# 2026-01-08T15:33:43.929451Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1124} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Resource usage for last stat interval, rate limiter was not found Consumption# ComputeTime: 0.001986s ReadRows: 0 ReadBytes: 0 RequestUnits# 1 ForceFlag# true trace_id# 2026-01-08T15:33:43.986225Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:43.986358Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:1505} ActorId: [1:1856:3216] TxId: 281474976710658. Ctx: { TraceId: 01kef3w1e95r5x4fjq5376h79z, Database: , SessionId: ydb://session/3?node_id=1&id=ZDk2OWYzMTUtM2QxYjY2N2UtY2VhMjEyZmQtYmExZGNjNzA=, PoolId: , DatabaseId: /Root, IsStreamingQuery: 0}. Terminate, become ZombieState trace_id# 2026-01-08T15:33:44.051477Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:1874:3234], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[/Root/table-1]
: Error: LookupError, code: 2005 2026-01-08T15:33:44.054531Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ZWQwN2ZiYjUtZmZhODdiZTEtYzdkOThkZjQtOThiZDQ5YTc=, ActorId: [1:1872:3232], ActorState: ExecuteState, LegacyTraceId: 01kef3w1xq10sxktf9f1rza9ar, ReplyQueryCompileError, remove tx status# UNAVAILABLE issues# { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[/Root/table-1]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } tx_id# trace_id# |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeError [GOOD] >> KqpErrors::ProposeErrorEvWrite >> BasicUsage::WriteSessionWriteInHandlers >> DataShardWrite::LocksBrokenStats [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |87.1%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2026-01-08T15:26:06.878632Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.878664Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.878681Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:26:06.883204Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:06.883286Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.883320Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.884425Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008959s 2026-01-08T15:26:06.884964Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:06.884998Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.885017Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.885054Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008232s 2026-01-08T15:26:06.885427Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:26:06.885456Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.885470Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:26:06.885510Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008344s 2026-01-08T15:26:07.017476Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1767885967017449 2026-01-08T15:26:07.397175Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012415526778831:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:07.399520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:07.502465Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:07.683004Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:26:08.205812Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.206650Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:26:08.341777Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:08.431865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:08.432387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:08.439694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:08.439767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:08.492890Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:26:08.493135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:08.554395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:08.654334Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:08.657319Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:26:08.798811Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:08.813940Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:08.830955Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:10.199812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/003ea2/r3tmp/yandexzxlTRV.tmp 2026-01-08T15:26:10.199839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/003ea2/r3tmp/yandexzxlTRV.tmp 2026-01-08T15:26:10.200093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/003ea2/r3tmp/yandexzxlTRV.tmp 2026-01-08T15:26:10.200261Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:26:10.883482Z INFO: TTestServer started on Port 7200 GrpcPort 23346 PQClient connected to localhost:23346 2026-01-08T15:26:12.127865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:12.396445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593012415526778831:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:12.396503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:26:12.637615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:26:23.230776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:26:23.230801Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:25.319244Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012491684539533:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.322380Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012491684539545:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.339505Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.343178Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593012491684539557:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.343233Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.401104Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3e9bsb4x9yqs5zn46t7a4", Request deadline has expired for 7.614757s seconds 2026-01-08T15:26:25.422500Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1359: TraceId: "01kef3efx32z3vd6gvda638eqc", Request deadline has expired for 0.975159s seconds 2026-01-08T15:26:25.422599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012492836191613:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.435787Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.487718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012492836191641:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.514727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012492836191670:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.515528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.531092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593012492836191672:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.531975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:26:25.539728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:26:25.833592Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593012491684539558:2147] txid# 281474976720657, issues: { ... 57-efac5b21-85dfc0e8_0 grpc read done: success: 0 data: 2026-01-08T15:33:38.467717Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|4ea73c27-b6a55c57-efac5b21-85dfc0e8_0 grpc read failed 2026-01-08T15:33:38.467778Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|4ea73c27-b6a55c57-efac5b21-85dfc0e8_0 grpc closed 2026-01-08T15:33:38.467811Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|4ea73c27-b6a55c57-efac5b21-85dfc0e8_0 is DEAD 2026-01-08T15:33:38.468779Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:33:38.470570Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [17:7593014350888028669:2609] destroyed 2026-01-08T15:33:38.470662Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:33:38.470714Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.470740Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.470774Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.470803Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.470841Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.476401Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.476454Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.476478Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.476513Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.476534Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.576840Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.576905Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.576931Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.576978Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.576999Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.678633Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.678686Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.678718Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.678770Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.678794Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.779655Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.779699Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.779722Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.779771Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.779790Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.881635Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.881698Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.881739Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.881784Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.881804Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:38.982750Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:38.982798Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.982822Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:38.982859Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:38.982880Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.083202Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.083260Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.083285Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.083323Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.083341Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.185038Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.185084Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.185109Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.185155Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.185173Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.290456Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.290507Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.290533Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.290568Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.290589Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.390838Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.390888Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.390911Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.390947Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.390966Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.494719Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.494777Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.494803Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.494838Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.494868Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.595345Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.595388Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.595419Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.595446Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.595462Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:33:39.702527Z node 17 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:33:39.702577Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.702599Z node 17 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:33:39.702638Z node 17 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:33:39.702654Z node 17 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist |87.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [GOOD] >> BasicUsage::GetAllStartPartitionSessions [GOOD] >> BasicUsage::PreferredDatabaseNoFallback >> TTxDataShardReshuffleKMeansScan::MainToPosting [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToPostingWithOverlap >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink >> DataShardWrite::DistributedInsertDuplicateWithLocks-Volatile [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::LocksBrokenStats [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:32:29.660588Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:29.788433Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:32:29.788821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:32:29.788981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:32:30.073565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:30.073740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:30.178077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886346304301 != 1767886346304305 2026-01-08T15:32:30.200140Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:30.254035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:30.400847Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:32:30.759613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:30.777360Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:30.888489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:30.940435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:32:30.941723Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:32:30.942114Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:32:30.942541Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:32:30.992822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:32:30.993638Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:32:30.993751Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:32:30.995759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:32:30.995885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:32:30.995960Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:32:30.996384Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:32:30.996595Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:32:30.996714Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:32:31.011046Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:32:31.057006Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:32:31.057282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:32:31.057412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:32:31.057475Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:32:31.057518Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:32:31.057564Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:32:31.057863Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:31.057933Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:31.058545Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:32:31.058707Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:32:31.058837Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:32:31.058884Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:32:31.058942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:32:31.058988Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:32:31.059032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:32:31.059073Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:32:31.059142Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:32:31.059745Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:31.059839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:31.059895Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:32:31.060053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:32:31.060099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:32:31.060273Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:32:31.060562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:32:31.060637Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:32:31.060827Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:32:31.060915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:32:31.060986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:32:31.061043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:32:31.061091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:32:31.061517Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:32:31.061576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:32:31.061621Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:32:31.061666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:32:31.061727Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:32:31.061763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:32:31.061801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:32:31.061848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:32:31.061902Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:32:31.063926Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:32:31.063991Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:32:31.074952Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:32:31.075074Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... 6710661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 38) table: [1:997:0] 2026-01-08T15:33:47.297658Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2026-01-08T15:33:47.297689Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:33:47.297715Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2026-01-08T15:33:47.297745Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:47.297774Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:33:47.297807Z node 11 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2500/18446744073709551615 ImmediateWriteEdge# v2501/0 ImmediateWriteEdgeReplied# v2501/0 2026-01-08T15:33:47.297852Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2026-01-08T15:33:47.297886Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:33:47.297910Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:47.297935Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit BlockFailPoint 2026-01-08T15:33:47.297958Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit BlockFailPoint 2026-01-08T15:33:47.297980Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:33:47.298003Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit BlockFailPoint 2026-01-08T15:33:47.298024Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2026-01-08T15:33:47.298048Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2026-01-08T15:33:47.298076Z node 11 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:6] at 72075186224037888 2026-01-08T15:33:47.298174Z node 11 :TX_DATASHARD TRACE: datashard_kqp.cpp:815: KqpEraseLock LockId: 281474976710661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 38 2026-01-08T15:33:47.298227Z node 11 :TX_DATASHARD DEBUG: execute_write_unit.cpp:469: Skip empty write operation for [0:6] at 72075186224037888 2026-01-08T15:33:47.298289Z node 11 :TX_DATASHARD TRACE: execute_write_unit.cpp:48: add locks to result: 0 2026-01-08T15:33:47.298407Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:33:47.298453Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2026-01-08T15:33:47.298500Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2026-01-08T15:33:47.298546Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:33:47.298579Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is DelayComplete 2026-01-08T15:33:47.298605Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2026-01-08T15:33:47.298633Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:33:47.298659Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:33:47.298699Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:6] at 72075186224037888 is Executed 2026-01-08T15:33:47.298721Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:33:47.298747Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:6] at 72075186224037888 has finished 2026-01-08T15:33:47.298800Z node 11 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2026-01-08T15:33:47.298823Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2026-01-08T15:33:47.298859Z node 11 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2026-01-08T15:33:47.298918Z node 11 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:33:47.300412Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 275709965, Sender [11:70:2117], Recipient [11:886:2765]: NKikimrLongTxService.TEvLockStatus LockId: 281474976710661 LockNode: 11 Status: STATUS_NOT_FOUND 2026-01-08T15:33:47.300980Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [11:1108:2914], Recipient [11:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:47.301025Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:47.301094Z node 11 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [11:1107:2913], serverId# [11:1108:2914], sessionId# [0:0:0] 2026-01-08T15:33:47.301233Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553169, Sender [11:1106:2912], Recipient [11:886:2765]: NKikimrTxDataShard.TEvGetInfoRequest 2026-01-08T15:33:47.303816Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [11:1111:2917], Recipient [11:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:47.303906Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:33:47.303968Z node 11 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [11:1110:2916], serverId# [11:1111:2917], sessionId# [0:0:0] 2026-01-08T15:33:47.304251Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [11:1109:2915], Recipient [11:886:2765]: NKikimrTxDataShard.TEvRead ReadId: 1000 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2026-01-08T15:33:47.304386Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:33:47.304453Z node 11 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2500/18446744073709551615 ImmediateWriteEdge# v2501/0 ImmediateWriteEdgeReplied# v2501/18446744073709551615 2026-01-08T15:33:47.304540Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037888 changed HEAD read to non-repeatable v2501/18446744073709551615 2026-01-08T15:33:47.304625Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2026-01-08T15:33:47.304747Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:33:47.304807Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:33:47.304854Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:47.304902Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:33:47.304948Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2026-01-08T15:33:47.304992Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:33:47.305021Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:47.305038Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:33:47.305055Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:33:47.305150Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 1000 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2026-01-08T15:33:47.305399Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[11:1109:2915], 1000} after executionsCount# 1 2026-01-08T15:33:47.305479Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[11:1109:2915], 1000} sends rowCount# 1, bytes# 32, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551583, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:47.305577Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[11:1109:2915], 1000} finished in read 2026-01-08T15:33:47.305639Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:33:47.305660Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:33:47.305684Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:33:47.305711Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:33:47.305759Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037888 is Executed 2026-01-08T15:33:47.305781Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:33:47.305809Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:7] at 72075186224037888 has finished 2026-01-08T15:33:47.305857Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:33:47.305981Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_write/unittest >> BasicUsage::BasicWriteSession >> BasicUsage::FallbackToSingleDb [GOOD] >> BasicUsage::FallbackToSingleDbAfterBadRequest >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanScalar >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> BasicUsage::SelectDatabaseByHash [GOOD] >> BasicUsage::SelectDatabase [GOOD] >> TKeyValueTest::TestBasicWriteRead >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DistributedInsertDuplicateWithLocks-Volatile [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:32:34.748719Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:34.883968Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:32:34.884373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:32:34.884544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:32:35.265732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:35.265866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:35.470593Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886350667056 != 1767886350667060 2026-01-08T15:32:35.507784Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:35.589977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:35.727185Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:32:36.146759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:36.163048Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:36.279218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:36.324405Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:881:2763], Recipient [1:897:2773]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:32:36.325431Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:881:2763], Recipient [1:897:2773]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:32:36.325657Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:32:36.325888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:32:36.336866Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:883:2764], Recipient [1:899:2775]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:32:36.383180Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:881:2763], Recipient [1:897:2773]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:32:36.383650Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:883:2764], Recipient [1:899:2775]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:32:36.383980Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:32:36.384234Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:32:36.394231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:883:2764], Recipient [1:899:2775]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:32:36.398052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:32:36.398236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:32:36.400169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:32:36.400289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:32:36.400375Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:32:36.400834Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:32:36.401143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:32:36.401274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:32:36.401763Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:32:36.401881Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:32:36.403695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:32:36.403771Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:32:36.403846Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:32:36.404174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:32:36.404305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:32:36.404378Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:32:36.417939Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:32:36.463704Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:32:36.463980Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:32:36.464156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:32:36.464210Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:32:36.464259Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:32:36.464298Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:32:36.464547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:897:2773], Recipient [1:897:2773]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:36.464611Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:36.465056Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:32:36.465103Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:32:36.465201Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:32:36.465266Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:32:36.465293Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:32:36.465316Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:32:36.465354Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:32:36.465527Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:32:36.465647Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:32:36.465740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:899:2775], Recipient [1:899:2775]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:36.465795Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:36.465982Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:32:36.466037Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:32:36.466103Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:32:36.466156Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:32:36.466198Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:32:36.466235Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:32:36.466285Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:32:36.466493Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:901:2776], Recipient [1:897:2773]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:36.466537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:36.466598Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:32:36.466652Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:32:36.466746Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:32:36.467151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:901:2776] 2026-01-08T15:32:36.467215Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:32:36.467342Z nod ... k, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:48.692534Z node 11 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2026-01-08T15:33:48.692872Z node 11 :GLOBAL WARN: log.cpp:841: fline=events.h:105;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=1234567890011; 2026-01-08T15:33:48.693138Z node 11 :TX_DATASHARD ERROR: datashard.cpp:760: Complete volatile write [3000 : 1234567890011] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } 2026-01-08T15:33:48.693228Z node 11 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:33:48.693348Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [11:921:2784], Recipient [11:918:2782]: {TEvReadSet step# 3000 txid# 1234567890011 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2026-01-08T15:33:48.693382Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:33:48.693416Z node 11 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 2026-01-08T15:33:48.964269Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [11:1164:2967], Recipient [11:918:2782]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:33:48.964511Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:33:48.964648Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2026-01-08T15:33:48.964830Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:33:48.964899Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:33:48.964959Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:48.965012Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:33:48.965070Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2026-01-08T15:33:48.965124Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:33:48.965152Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:48.965179Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:33:48.965204Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:33:48.965365Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:33:48.965689Z node 11 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2026-01-08T15:33:48.965765Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[11:1164:2967], 0} after executionsCount# 1 2026-01-08T15:33:48.965834Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[11:1164:2967], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:48.965946Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[11:1164:2967], 0} finished in read 2026-01-08T15:33:48.966048Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:33:48.966079Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:33:48.966107Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:33:48.966137Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:33:48.966190Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:33:48.966218Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:33:48.966249Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:5] at 72075186224037888 has finished 2026-01-08T15:33:48.966304Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:33:48.966466Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:33:48.967190Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [11:1164:2967], Recipient [11:918:2782]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:33:48.967266Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } 2026-01-08T15:33:48.967439Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [11:1164:2967], Recipient [11:921:2784]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 RangesSize: 1 2026-01-08T15:33:48.967543Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2026-01-08T15:33:48.967602Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit CheckRead 2026-01-08T15:33:48.967666Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:48.967695Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit CheckRead 2026-01-08T15:33:48.967723Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit BuildAndWaitDependencies 2026-01-08T15:33:48.967771Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit BuildAndWaitDependencies 2026-01-08T15:33:48.967816Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037889 2026-01-08T15:33:48.967851Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:48.967879Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit BuildAndWaitDependencies 2026-01-08T15:33:48.967902Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:33:48.967932Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:33:48.968027Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2026-01-08T15:33:48.968239Z node 11 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v3000/18446744073709551615 2026-01-08T15:33:48.968284Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[11:1164:2967], 1} after executionsCount# 1 2026-01-08T15:33:48.968324Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[11:1164:2967], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:33:48.968390Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[11:1164:2967], 1} finished in read 2026-01-08T15:33:48.968438Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:48.968468Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:33:48.968495Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:33:48.968521Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:33:48.968563Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037889 is Executed 2026-01-08T15:33:48.968585Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:33:48.968616Z node 11 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:5] at 72075186224037889 has finished 2026-01-08T15:33:48.968645Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:33:48.968727Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:33:48.970155Z node 11 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [11:1164:2967], Recipient [11:921:2784]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2026-01-08T15:33:48.970227Z node 11 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 1 } { items { int32_value: 1 } items { int32_value: 1001 } }, { items { int32_value: 11 } items { int32_value: 1002 } } |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_write/unittest |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |87.1%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SelectDatabase [GOOD] |87.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::AlterTopicWithSharedConsumer_SetMoveDeadLetterPolicy [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_AlterMoveDeadLetterPolicy >> KqpPg::Returning-useSink [GOOD] >> KqpPg::SelectIndex+useSink >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> TKeyValueTest::TestWriteDeleteThenReadRemaining >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |87.1%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |87.1%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany [GOOD] >> KeyValueReadStorage::ReadWithTwoPartsOk >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:33:22.219333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:33:22.219421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:22.219480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:33:22.219523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:33:22.219583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:33:22.219614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:33:22.219674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:33:22.219781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:33:22.220687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:33:22.221038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:33:22.338072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:33:22.338143Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:22.361294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:33:22.362660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:33:22.362932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:33:22.391958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:33:22.392436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:33:22.393259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:33:22.394332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:33:22.402402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:22.402659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:22.403926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:22.403996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:22.404117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:22.404186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:22.404232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:22.404403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:33:22.715406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.716568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.716710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.716839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.716912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:33:22.717846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 3:52.702582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2514: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2026-01-08T15:33:52.702878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2893: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.702997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2972: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.703360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3473: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.703444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3509: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.703690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.703849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.703925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.704983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.705037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.705171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.705219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.705272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:33:52.705582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:33:52.714289Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:52.714529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:33:52.719463Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435083, Sender [1:2148:4072], Recipient [1:2148:4072]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:52.719525Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5311: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2026-01-08T15:33:52.721760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:33:52.721857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:33:52.722301Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271124999, Sender [1:2148:4072], Recipient [1:2148:4072]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:52.722372Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5255: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2026-01-08T15:33:52.722537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:33:52.722599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:33:52.722668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:33:52.722708Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:33:52.723080Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 274399233, Sender [1:2186:4072], Recipient [1:2148:4072]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:52.723122Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5421: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2026-01-08T15:33:52.723158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:2148:4072] sender: [1:2205:2058] recipient: [1:15:2062] 2026-01-08T15:33:52.790487Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:2204:4117], Recipient [1:2148:4072]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:33:52.790570Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:33:52.790699Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:33:52.791043Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 307us result status StatusSuccess 2026-01-08T15:33:52.791922Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 27456 RowCount: 200 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 23702 Memory: 156864 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 27456 DataSize: 27456 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_stats/unittest >> TKeyValueTest::TestVacuumOnEmptyTablet >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] >> DataShardWrite::DistributedInsertWithoutLocks-Volatile >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] >> BasicUsage::WriteSessionCloseWaitsForWrites [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] Test command err: 2026-01-08T15:33:55.145224Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:211} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2026-01-08T15:33:55.147456Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:494} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query |87.1%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TA] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueCollectorTest::TestKeyValueCollectorSingle >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |87.1%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TxUsage::WriteToTopic_Demo_45_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 >> TKeyValueCollectorTest::TestKeyValueCollectorSingle [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] >> TxUsage::WriteToTopic_Demo_46_Table >> KqpRboYql::OlapPredicatePushdown |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink [GOOD] >> KqpRboPg::TestCrossInnerJoin-ColumnStore |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator >> TTxDataShardReshuffleKMeansScan::MainToPostingWithOverlap [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink >> TTxDataShardReshuffleKMeansScan::MainToBuild |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |87.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest |87.1%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query >> KqpRboPg::ConstantFolding >> KqpPg::NoSelectFullScan [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency [GOOD] >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit >> KqpErrors::ProposeErrorEvWrite [GOOD] >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] >> KqpPg::LongDomainName >> TKeyValueTest::TestWriteAndRenameWithoutCreationUnixTimeNewApi >> TKeyValueTest::TestWriteAndRenameWithoutCreationUnixTimeNewApi [GOOD] |87.1%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteAndRenameWithoutCreationUnixTimeNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:59:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:59:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:451:2057] recipient: [2:39:2086] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:454:2057] recipient: [2:453:2379] Leader for TabletID 72057594037927937 is [2:455:2380] sender: [2:456:2057] recipient: [2:453:2379] !Reboot 72057594037927937 (actor [2:58:2099]) rebooted! !Reboot 72057594037927937 (actor [2:58:2099]) tablet resolver refreshed! new actor is[2:455:2380] Leader for TabletID 72057594037927937 is [2:455:2380] sender: [2:571:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:59:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:451:2057] recipient: [3:39:2086] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:454:2057] recipient: [3:453:2379] Leader for TabletID 72057594037927937 is [3:455:2380] sender: [3:456:2057] recipient: [3:453:2379] !Reboot 72057594037927937 (actor [3:58:2099]) rebooted! !Reboot 72057594037927937 (actor [3:58:2099]) tablet resolver refreshed! new actor is[3:455:2380] Leader for TabletID 72057594037927937 is [3:455:2380] sender: [3:571:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:59:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:452:2057] recipient: [4:39:2086] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:455:2057] recipient: [4:454:2379] Leader for TabletID 72057594037927937 is [4:456:2380] sender: [4:457:2057] recipient: [4:454:2379] !Reboot 72057594037927937 (actor [4:58:2099]) rebooted! !Reboot 72057594037927937 (actor [4:58:2099]) tablet resolver refreshed! new actor is[4:456:2380] Leader for TabletID 72057594037927937 is [4:456:2380] sender: [4:572:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:59:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:76:2057] recipient: [5:14:2061] |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest >> KqpRboYql::Filter+ColumnStore >> DataShardWrite::DistributedInsertWithoutLocks-Volatile [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeErrorEvWrite [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:33:40.132183Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:40.133170Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:40.137768Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:685:2347], Scheduled retry for error: {
: Error: Scheme service not found } 2026-01-08T15:33:40.257339Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:33:40.258104Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:299:2225], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:33:40.258886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:40.259200Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:40.261116Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:40.261223Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:40.716707Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:40.847721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:40.847870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:40.848354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:40.848424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:40.922591Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:40.923477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:40.923869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:41.029328Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:41.044834Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:41.874676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:41.941781Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:41.941946Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:42.310649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:43.410608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1909:3226], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:43.410755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1920:3231], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:43.410886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:43.412165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1925:3236], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:43.412411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:43.418864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:44.058303Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1923:3234], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:33:44.333329Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:2034:3312] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:44.803443Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:92: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2026-01-08T15:33:44.803606Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:99} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , SessionId: , PoolId: , IsStreamingQuery: 0}. Begin literal execution operation_timeout# 0.000000s cancel_after# trace_id# 2026-01-08T15:33:44.803667Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:133} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , SessionId: , PoolId: , IsStreamingQuery: 0}. Begin literal execution transactions_count# 1 trace_id# 2026-01-08T15:33:44.803774Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:144} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , SessionId: , PoolId: , IsStreamingQuery: 0}. Stage AST stage_id# [0,0] ast# ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) trace_id# 2026-01-08T15:33:44.803830Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 1 from task: 1 with index: 0 2026-01-08T15:33:44.807437Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:293} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , SessionId: , PoolId: , IsStreamingQuery: 0}. Execution is complete results_size# 1 trace_id# 2026-01-08T15:33:44.819427Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:92: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2026-01-08T15:33:44.819551Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:99} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Begin literal execution operation_timeout# 299.417649s cancel_after# trace_id# 2026-01-08T15:33:44.819606Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:133} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Begin literal execution transactions_count# 1 trace_id# 2026-01-08T15:33:44.819673Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:144} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Stage AST stage_id# [0,0] ast# ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) trace_id# 2026-01-08T15:33:44.819814Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 1 from task: 1 with index: 0 2026-01-08T15:33:44.820569Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:293} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Execution is complete results_size# 1 trace_id# 2026-01-08T15:33:44.820855Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:92: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2026-01-08T15:33:44.820940Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:187} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Bootstrap done, become ReadyState trace_id# 2026-01-08T15:33:44.821279Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:836} ActorId: [1:2060:3224] TxId: 281474976710660. Ctx: { TraceId: 01kef3w1bfe98a8xknjkvh797s, Database: , SessionId: ydb://session/3?node_id=1&id=NzUwMjM4MmMtODM2NjY0YmEtOGUxZTE0ZjEtZTlkNDA5MWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Executing physical ... # 2026-01-08T15:33:59.890073Z node 3 :KQP_EXECUTER TRACE: {KQPDATA@kqp_data_executer.cpp:2750} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Updating channels after the creation of compute actors trace_id# 2026-01-08T15:33:59.890125Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [3:2319:3461] 2026-01-08T15:33:59.890179Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [3:2319:3461], channels: 0 2026-01-08T15:33:59.890247Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [3:2319:3461], trace_id# 2026-01-08T15:33:59.890427Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:2319:3461], trace_id# 2026-01-08T15:33:59.890500Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:2350} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Immediate tx, become ExecuteState current_state# WaitResolveState immediate# true trace_id# 2026-01-08T15:33:59.891493Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [3:2319:3461] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {} trace_id# 2026-01-08T15:33:59.891580Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [3:2319:3461], trace_id# 2026-01-08T15:33:59.891653Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:2319:3461], trace_id# 2026-01-08T15:33:59.892800Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [3:2319:3461] TaskId# 1 State# COMPUTE_STATE_FINISHED Stats# {CpuTimeUs: 660 Tasks { TaskId: 1 CpuTimeUs: 112 FinishTimeMs: 1767886439892 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 23 BuildCpuTimeUs: 89 HostName: "ghrun-wgi6cnz7jm" NodeId: 3 CreateTimeMs: 1767886439890 UpdateTimeMs: 1767886439892 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:33:59.892933Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [3:2319:3461] 2026-01-08T15:33:59.893023Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:220} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Send Commit to BufferActor buffer_actor_id# [3:2315:3461] trace_id# 2026-01-08T15:33:59.893105Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1124} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Resource usage for last stat interval, rate limiter was not found Consumption# ComputeTime: 0.000660s ReadRows: 0 ReadBytes: 0 RequestUnits# 1 ForceFlag# true trace_id# 2026-01-08T15:33:59.915710Z node 3 :KQP_COMPUTE WARN: kqp_write_actor.cpp:860: SelfId: [3:2321:3461], Table: `/Root/table-1` ([72057594046644480:38:1]), SessionActorId: [3:2306:3461]Got DISK_GROUP_OUT_OF_SPACE for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:2321:3461]. Ignored this error. 2026-01-08T15:33:59.915883Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:4656: SelfId: [3:2315:3461], SessionActorId: [3:2306:3461], statusCode=UNAVAILABLE. Issue=
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2036 . sessionActorId=[3:2306:3461]. 2026-01-08T15:33:59.916335Z node 3 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2328} SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, ActorId: [3:2306:3461], ActorState: ExecuteState, LegacyTraceId: 01kef3whar23bmmvcw8gh4bz8k, got TEvKqpBuffer::TEvError in ExecuteState, status: UNAVAILABLE send to: [3:2316:3461] from: [3:2315:3461] trace_id# 2026-01-08T15:33:59.916538Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1071} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got EvAbortExecution Status# UNAVAILABLE Issues# {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2036 } trace_id# 2026-01-08T15:33:59.916655Z node 3 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1269} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Runtime error Status# UNAVAILABLE Issues# {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2036 } trace_id# 2026-01-08T15:33:59.916765Z node 3 :KQP_EXECUTER INFO: {KQPEX@kqp_executer_impl.h:1210} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Task does not have the CA id yet or is already complete TaskId# 1 trace_id# 2026-01-08T15:33:59.916996Z node 3 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:1349} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ReplyErrorAndDie Response# Status: UNAVAILABLE Issues { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2036 severity: 1 } Result { Stats { CpuTimeUs: 660 Tables { TablePath: "/Root/table-1" WriteRows: 1 WriteBytes: 8 AffectedPartitions: 1 } } } TargetActor# [3:2306:3461] trace_id# 2026-01-08T15:33:59.917071Z node 3 :KQP_EXECUTER INFO: {KQPDATA@kqp_data_executer.cpp:2823} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Shutdown immediately - nothing to wait trace_id# 2026-01-08T15:33:59.917252Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:59.917343Z node 3 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:1505} ActorId: [3:2316:3461] TxId: 281474976710683. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate, become ZombieState trace_id# 2026-01-08T15:33:59.917688Z node 3 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, ActorId: [3:2306:3461], ActorState: ExecuteState, LegacyTraceId: 01kef3whar23bmmvcw8gh4bz8k, Create QueryResponse for error on request, msg: status# UNAVAILABLE issues# { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2036 severity: 1 } trace_id# 2026-01-08T15:33:59.917973Z node 3 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:187} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Bootstrap done, become ReadyState trace_id# 2026-01-08T15:33:59.918746Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:236} ActorId: [3:2324:3461] TxId: 281474976710684. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Send Rollback to BufferActor buffer_actor_id# [3:2315:3461] trace_id# 2026-01-08T15:33:59.919113Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [3:2324:3461] TxId: 281474976710684. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:33:59.919181Z node 3 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:1505} ActorId: [3:2324:3461] TxId: 281474976710684. Ctx: { TraceId: 01kef3whar23bmmvcw8gh4bz8k, Database: , SessionId: ydb://session/3?node_id=3&id=ZThkZmM5MzktZTI2ZTI4ZTgtNzQ4YzY2MmYtNGM3MDYzNWM=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate, become ZombieState trace_id# |87.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_kqp_errors/unittest |87.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |87.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |87.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> TKeyValueTest::TestBasicWriteRead [GOOD] >> TKeyValueTest::TestBasicWriteReadOverrun >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DistributedInsertWithoutLocks-Volatile [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:32:33.651472Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:33.800263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:32:33.800701Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:32:33.800873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:32:34.119587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:34.119756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:34.217239Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886350353161 != 1767886350353165 2026-01-08T15:32:34.229151Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:34.280547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:34.387647Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:32:34.718165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:34.750945Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:34.881191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:34.970539Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:32:34.971655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:32:34.972010Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:32:34.972295Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:32:35.022757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:32:35.023690Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:32:35.023830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:32:35.025654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:32:35.025759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:32:35.025819Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:32:35.026259Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:32:35.026451Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:32:35.026578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:32:35.037562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:32:35.071038Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:32:35.071296Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:32:35.071465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:32:35.071505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:32:35.071541Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:32:35.071579Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:32:35.071852Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:35.071906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:32:35.072226Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:32:35.072322Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:32:35.072398Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:32:35.072501Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:32:35.072564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:32:35.072604Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:32:35.072637Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:32:35.072688Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:32:35.072733Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:32:35.073175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:35.073215Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:32:35.073258Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:32:35.073394Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:32:35.073440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:32:35.073585Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:32:35.073832Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:32:35.073898Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:32:35.074001Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:32:35.074054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:32:35.074097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:32:35.074133Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:32:35.074170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:32:35.074484Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:32:35.074527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:32:35.074567Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:32:35.074619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:32:35.074669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:32:35.074700Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:32:35.074735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:32:35.074768Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:32:35.074800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:32:35.076320Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:32:35.076371Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:32:35.093263Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:32:35.093356Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... at 72075186224037889 2026-01-08T15:34:01.743758Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [12:921:2784], Recipient [12:919:2782]: {TEvReadSet step# 3000 txid# 1234567890011 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2026-01-08T15:34:01.743802Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:34:01.743882Z node 12 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 2026-01-08T15:34:01.744066Z node 12 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2026-01-08T15:34:01.744144Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287938, Sender [12:919:2782], Recipient [12:921:2784]: {TEvReadSet step# 3000 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 2} 2026-01-08T15:34:01.744180Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3173: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2026-01-08T15:34:01.744210Z node 12 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2026-01-08T15:34:02.013521Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [12:1165:2968], Recipient [12:919:2782]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:34:02.013717Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:34:02.013828Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2026-01-08T15:34:02.013981Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037888 is Executed 2026-01-08T15:34:02.014059Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:34:02.014127Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:34:02.014187Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:34:02.014252Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2026-01-08T15:34:02.014308Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037888 is Executed 2026-01-08T15:34:02.014366Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:34:02.014395Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:34:02.014440Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:34:02.014611Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:34:02.014983Z node 12 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2026-01-08T15:34:02.015077Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[12:1165:2968], 0} after executionsCount# 1 2026-01-08T15:34:02.015168Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[12:1165:2968], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:34:02.015294Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[12:1165:2968], 0} finished in read 2026-01-08T15:34:02.015402Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037888 is Executed 2026-01-08T15:34:02.015437Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:34:02.015469Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:34:02.015505Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:34:02.015563Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037888 is Executed 2026-01-08T15:34:02.015596Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:34:02.015638Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:4] at 72075186224037888 has finished 2026-01-08T15:34:02.015702Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:34:02.015882Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:34:02.016761Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [12:1165:2968], Recipient [12:919:2782]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:34:02.016849Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } 2026-01-08T15:34:02.017016Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [12:1165:2968], Recipient [12:921:2784]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2026-01-08T15:34:02.017137Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2026-01-08T15:34:02.017198Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2026-01-08T15:34:02.017270Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:34:02.017301Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2026-01-08T15:34:02.017329Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2026-01-08T15:34:02.017358Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2026-01-08T15:34:02.017431Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2026-01-08T15:34:02.017472Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:34:02.017502Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2026-01-08T15:34:02.017527Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:34:02.017556Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:34:02.017674Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2026-01-08T15:34:02.017902Z node 12 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v3000/18446744073709551615 2026-01-08T15:34:02.017944Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[12:1165:2968], 1} after executionsCount# 1 2026-01-08T15:34:02.017987Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[12:1165:2968], 1} sends rowCount# 2, bytes# 64, quota rows left# 997, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:34:02.018055Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[12:1165:2968], 1} finished in read 2026-01-08T15:34:02.018111Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:34:02.018138Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:34:02.018165Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:34:02.018196Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:34:02.018244Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:34:02.018273Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:34:02.018301Z node 12 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:4] at 72075186224037889 has finished 2026-01-08T15:34:02.018438Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:34:02.018535Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:34:02.020248Z node 12 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [12:1165:2968], Recipient [12:921:2784]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2026-01-08T15:34:02.020312Z node 12 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 1 } { items { int32_value: 1 } items { int32_value: 1001 } }, { items { int32_value: 2 } items { int32_value: 1003 } }, { items { int32_value: 11 } items { int32_value: 1002 } }, { items { int32_value: 12 } items { int32_value: 1004 } } |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_write/unittest >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] >> KqpRboPg::TestCrossInnerJoin-ColumnStore [GOOD] >> KqpRboPg::UnionAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] Test command err: Trying to start YDB, gRPC: 4917, MsgBus: 8676 2026-01-08T15:32:01.653249Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013935536942690:2085];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:01.668354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:32:01.705489Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:32:02.144047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:32:02.144178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:32:02.308552Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:32:02.336745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:32:02.371495Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:32:02.711798Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:02.738703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:32:02.738727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:32:02.738734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:32:02.738830Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:02.745440Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:03.830903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:03.918052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:04.205059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:04.623096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:04.745877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:06.654703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013935536942690:2085];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:06.654806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:07.076507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013961306748291:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:07.076637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:07.077513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013961306748301:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:07.077597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:07.591572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.638407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.680752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.731358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.775315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.829795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.888923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:07.991204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:08.119653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013965601716477:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:08.119746Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:08.121993Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013965601716482:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:08.122049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013965601716483:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:08.122380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:08.128187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:08.147763Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013965601716486:2492], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:32:08.226662Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013965601716537:3782] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:11.494274Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886331512, txId: 281474976710674] shutting down 2026-01-08T15:32:12.024210Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discardin ... 6-01-08T15:33:51.044228Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014407390244102:2087];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:51.045140Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:51.158416Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:51.162937Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:51.163041Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:51.202860Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:51.215761Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:51.322533Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:51.322559Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:51.322571Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:51.322660Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:51.338043Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:52.074595Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:52.100535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:52.111347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:33:52.129233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:52.283910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:52.534000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:52.637174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:56.046901Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014407390244102:2087];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:56.046993Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:56.682644Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014428865082393:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:56.682742Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:56.683069Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014428865082402:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:56.683116Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:56.758035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:56.821354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:56.875803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:56.920951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:56.969439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:57.012476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:57.062621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:57.131046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:57.253198Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014433160050570:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.253288Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.253861Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014433160050574:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.253922Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014433160050576:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.253939Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.257980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:57.272846Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014433160050579:2491], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:33:57.344718Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014433160050630:3769] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:01.387795Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7593014450339920236:2074], tablet: [2:7593014428865082746:2438], scanId: 801, table: /Root/Join1 2026-01-08T15:34:01.454744Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:239: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1767886441412, txId: 281474976715673] shutting down |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/yql/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 >> BasicUsage::WriteSessionWriteInHandlers [GOOD] >> KqpRboYql::ConstantFolding+ColumnStore >> BasicUsage::PropagateSessionClosed [GOOD] >> BasicUsage::ReadMirrored >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table >> KqpRboPg::ConstantFolding [GOOD] >> KqpRboPg::ExpressionSubquery >> KqpRboPg::Bench_Select ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:33:42.249344Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:42.250374Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:42.255072Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:685:2347], Scheduled retry for error: {
: Error: Scheme service not found } 2026-01-08T15:33:42.371963Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:33:42.372734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:299:2225], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:33:42.373460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:42.373807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:42.375943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:42.376170Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:33:42.915658Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:43.026054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:43.026194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:43.027752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:43.027892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:43.092258Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:43.093463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:43.093983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:43.202502Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:43.234398Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:33:44.032389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:44.095283Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:44.095430Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:44.448338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:45.429500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1908:3228], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:45.429591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1919:3233], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:45.429671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:45.430476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1923:3237], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:45.430577Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:45.436619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:46.067513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1922:3236], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:33:46.277083Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:2031:3313] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:46.875890Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:92: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2026-01-08T15:33:46.876040Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:187} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Bootstrap done, become ReadyState trace_id# 2026-01-08T15:33:46.876367Z node 1 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:836} ActorId: [1:2057:3226] TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Executing physical tx TxType# 2 Stages# 1 trace_id# 2026-01-08T15:33:46.876483Z node 1 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:851} ActorId: [1:2057:3226] TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got request, become WaitResolveState trace_id# 2026-01-08T15:33:46.876799Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:270: TxId: 281474976715660. Resolved key sets: 1 2026-01-08T15:33:46.877013Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:294: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 38] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2026-01-08T15:33:46.877439Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:585: TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks: 1, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, MayRunTasksLocally 1, snapshot: {0, 0} 2026-01-08T15:33:46.877869Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [1:2060:3226] 2026-01-08T15:33:46.877948Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [1:2060:3226], channels: 0 2026-01-08T15:33:46.878071Z node 1 :KQP_EXECUTER INFO: {KQPDATA@kqp_data_executer.cpp:2746} ActorId: [1:2057:3226] TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Total tasks total_tasks# 1 read_only# false datashard_txs# 0 ev_write_txs# 0 topic_txs# 0 volatile# false immediate# true pending_compute_tasks# 0 use_followers# false trace_id# 2026-01-08T15:33:46.878117Z node 1 :KQP_EXECUTER TRACE: {KQPDATA@kqp_data_executer.cpp:2750} ActorId: [1:2057:3226] TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Updating channels after the creation of compute actors trace_id# 2026-01-08T15:33:46.878169Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976715660. Ctx: { TraceId: 01kef3w3ak130en0bsg52d0ra8, Database: , SessionId: ydb://session/3?node_id=1&id=NTBlM2ZjNDEtZjRlMzU4OTktZDYxZDAxMzUtNDk3M2FiNDk=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [1:2060:3226] 2026-01-0 ... 0] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CT 1, CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.877441Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.878462Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1045} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Executing task TaskId# 1 ComputeActor# [4:2120:2473] trace_id# 2026-01-08T15:34:02.878537Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:872: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Collect channels updates for task: 1 at actor [4:2120:2473] 2026-01-08T15:34:02.878613Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:908: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Task: 1, output channelId: 1, dst task: 2, at actor [3:2119:3360] 2026-01-08T15:34:02.878689Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [4:2120:2473], channels: 1 2026-01-08T15:34:02.878734Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:864: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Sending channels info to compute actor: [3:2119:3360], channels: 1 2026-01-08T15:34:02.879077Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [4:2120:2473] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {} trace_id# 2026-01-08T15:34:02.879163Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.879231Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.880445Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [4:2120:2473] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {CpuTimeUs: 994 Tasks { TaskId: 1 CpuTimeUs: 575 ComputeCpuTimeUs: 14 BuildCpuTimeUs: 561 HostName: "ghrun-wgi6cnz7jm" NodeId: 4 CreateTimeMs: 1767886442875 CurrentWaitInputTimeUs: 19 UpdateTimeMs: 1767886442876 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:34:02.880583Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.880630Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.898153Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [4:2120:2473] TaskId# 1 State# COMPUTE_STATE_EXECUTING Stats# {CpuTimeUs: 1683 DurationUs: 9000 Tasks { TaskId: 1 CpuTimeUs: 822 FinishTimeMs: 1767886442897 OutputRows: 3 OutputBytes: 13 IngressRows: 3 ResultRows: 3 ResultBytes: 13 ComputeCpuTimeUs: 261 BuildCpuTimeUs: 561 WaitInputTimeUs: 8975 HostName: "ghrun-wgi6cnz7jm" NodeId: 4 StartTimeMs: 1767886442888 CreateTimeMs: 1767886442875 UpdateTimeMs: 1767886442897 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:34:02.898272Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.898641Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:2120:2473], CA [3:2119:3360], trace_id# 2026-01-08T15:34:02.900680Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [3:2119:3360] TaskId# 2 State# COMPUTE_STATE_FINISHED Stats# {CpuTimeUs: 1624 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 1234 FinishTimeMs: 1767886442899 InputRows: 3 InputBytes: 13 ComputeCpuTimeUs: 213 BuildCpuTimeUs: 1021 HostName: "ghrun-wgi6cnz7jm" NodeId: 3 CreateTimeMs: 1767886442873 UpdateTimeMs: 1767886442899 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:34:02.900787Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [3:2119:3360] 2026-01-08T15:34:02.900886Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:884} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Waiting for: CA [4:2120:2473], trace_id# 2026-01-08T15:34:02.900956Z node 3 :KQP_EXECUTER DEBUG: {KQPDATA@kqp_data_executer.cpp:168} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:2120:2473], trace_id# 2026-01-08T15:34:02.901471Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:570} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Got execution state from compute actor ActorState# ExecuteState ComputeActor# [4:2120:2473] TaskId# 1 State# COMPUTE_STATE_FINISHED Stats# {CpuTimeUs: 1938 DurationUs: 12000 Tasks { TaskId: 1 CpuTimeUs: 834 FinishTimeMs: 1767886442900 OutputRows: 3 OutputBytes: 13 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ResultRows: 3 ResultBytes: 13 ComputeCpuTimeUs: 273 BuildCpuTimeUs: 561 WaitInputTimeUs: 8975 HostName: "ghrun-wgi6cnz7jm" NodeId: 4 StartTimeMs: 1767886442888 CreateTimeMs: 1767886442875 UpdateTimeMs: 1767886442900 } MaxMemoryUsage: 1048576 } trace_id# 2026-01-08T15:34:02.901567Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:756: TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Compute actor has finished execution: [4:2120:2473] 2026-01-08T15:34:02.901809Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1490} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate execution trace_id# 2026-01-08T15:34:02.901889Z node 3 :KQP_EXECUTER TRACE: {KQPEX@kqp_executer_impl.h:1505} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Terminate, become ZombieState trace_id# 2026-01-08T15:34:02.901950Z node 3 :KQP_EXECUTER DEBUG: {KQPEX@kqp_executer_impl.h:1124} ActorId: [3:2111:3360] TxId: 281474976710663. Ctx: { TraceId: 01kef3wm5b3thq4t0kt058fgp9, Database: , SessionId: ydb://session/3?node_id=3&id=OTI3YjhlNDMtOTgyMjBkOGItNzBkNjI3ZjgtNTQ1YWRkYTI=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Resource usage for last stat interval, rate limiter was not found Consumption# ComputeTime: 0.003562s ReadRows: 3 ReadBytes: 24 RequestUnits# 3 ForceFlag# true trace_id# { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_kqp_errors/unittest |87.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 |87.2%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |87.2%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_AlterMoveDeadLetterPolicy [GOOD] >> BasicUsage::AlterTopicWithSharedConsumer_DeleteDeadLetterPolicy_AlterMoveDeadLetterPolicy >> BasicUsage::BasicWriteSession [GOOD] >> BasicUsage::CloseWriteSessionImmediately ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionWriteInHandlers [GOOD] Test command err: 2026-01-08T15:33:47.972948Z :WriteSessionWriteInHandlers INFO: Random seed for debugging is 1767886427972914 2026-01-08T15:33:48.521638Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014393801348957:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:48.521949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:48.624023Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:48.674641Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:48.739898Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014393146374943:2099];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:48.739970Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:48.781020Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:49.185952Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:49.201988Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:49.254551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:49.254663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:49.256374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:49.256426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:49.326744Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:49.326906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:49.343194Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:49.347362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:49.348684Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014393801348728:2082] 1767886428468101 != 1767886428468104 2026-01-08T15:33:49.443545Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:49.454084Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:49.466494Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.012239s 2026-01-08T15:33:49.525399Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:49.773981Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:49.944998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/000219/r3tmp/yandexadtRp1.tmp 2026-01-08T15:33:49.945033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/000219/r3tmp/yandexadtRp1.tmp 2026-01-08T15:33:49.945205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/000219/r3tmp/yandexadtRp1.tmp 2026-01-08T15:33:49.945289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:50.131037Z INFO: TTestServer started on Port 63806 GrpcPort 28160 PQClient connected to localhost:28160 2026-01-08T15:33:50.481472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:53.524857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014393801348957:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:53.524923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:53.736838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014393146374943:2099];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:53.736904Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:54.605903Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014418916179024:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:54.605903Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014418916179032:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:54.606036Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:54.606367Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014418916179039:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:54.606434Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:54.614288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:54.637380Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014418916179038:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2026-01-08T15:33:55.036429Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014418916179070:2141] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:55.069063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:55.086103Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014419571153960:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:55.087627Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=YTA4Y2E3ODUtZjkwMDcwNGYtNTY2NGIyZWQtYzVkNDI2YTc=, ActorId: [1:7593014419571153918:2332], ActorState: ExecuteState, LegacyTraceId: 01kef3wcbvan97hp2x7w041m8t, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:55.090059Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:33:55.102998Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014423211146380:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:55.104985Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=ZjQ5 ... .121605Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:636: [72075186224037892][Partition][0][StateIdle] Received TPartition::TEvWrite 2026-01-08T15:34:03.121640Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:03.121651Z node 2 :PERSQUEUE DEBUG: partition.cpp:2423: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxEvent(TMessage) 2026-01-08T15:34:03.121674Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 1, PendingWrites: 0 2026-01-08T15:34:03.121682Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:03.121696Z node 2 :PERSQUEUE DEBUG: partition.cpp:2487: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxPendingCommit(TMessage) 2026-01-08T15:34:03.121777Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1348: [72075186224037892][Partition][0][StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 2 partNo 0 2026-01-08T15:34:03.122646Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1452: [72075186224037892][Partition][0][StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 1 PartNo 0 PackedSize 181 count 1 nextOffset 2 batches 1 2026-01-08T15:34:03.122693Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 1 2026-01-08T15:34:03.122703Z node 2 :PERSQUEUE DEBUG: partition.cpp:2342: [72075186224037892][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:34:03.122715Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:03.123030Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1711: [72075186224037892][Partition][0][StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 1,1 HeadOffset 1 endOffset 1 curOffset 2 d0000000000_00000000000000000001_00000_0000000001_00000? size 169 WTime 1767886443122 2026-01-08T15:34:03.123183Z node 2 :PERSQUEUE DEBUG: read.h:275: [72075186224037892][PQCacheProxy]CacheProxy. Passthrough write request to KV 2026-01-08T15:34:03.123256Z node 2 :PERSQUEUE DEBUG: read.h:313: [72075186224037892][PQCacheProxy]CacheProxy. Passthrough blob. Partition 0 offset 1 partNo 0 count 1 size 169 2026-01-08T15:34:03.126007Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:311: Caching head blob in L1. Partition 0 offset 1 count 1 size 169 actorID [2:7593014453275917874:2394] 2026-01-08T15:34:03.126041Z node 2 :PERSQUEUE DEBUG: partition.cpp:2153: [72075186224037892][Partition][0][StateIdle] Received TEvKeyValue::TEvResponse 2026-01-08T15:34:03.126079Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:572: [72075186224037892][Partition][0][StateIdle] TPartition::HandleWriteResponse writeNewSize# 114 WriteNewSizeFromSupportivePartitions# 0 2026-01-08T15:34:03.126102Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:60: [72075186224037892][Partition][0][StateIdle] TPartition::ReplyWrite. Partition: 0 2026-01-08T15:34:03.126122Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:365: [72075186224037892][Partition][0][StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2026-01-08T15:34:03.126227Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:03.126239Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.126246Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:03.126254Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.126261Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:03.126279Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:193: [72075186224037892][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:34:03.126299Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 3 requestId: cookie: 2 2026-01-08T15:34:03.126391Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:129: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' size 169 2026-01-08T15:34:03.126546Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2026-01-08T15:34:03.130500Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2026-01-08T15:34:03.130660Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 2 written { offset: 1 } } write_statistics { persisting_time { nanos: 3000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2026-01-08T15:34:03.130689Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] OnAck: seqNo=2, txId=? 2026-01-08T15:34:03.130719Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: acknoledged message 2 === Inside AcksHandler === Inside SessionClosedHandler 2026-01-08T15:34:03.131761Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write 1 messages with Id from 3 to 3 === SessionClosedHandler has 'written' a message 2026-01-08T15:34:03.131895Z :INFO: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2026-01-08T15:34:03.131931Z :INFO: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session will now close 2026-01-08T15:34:03.131973Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: aborting 2026-01-08T15:34:03.132729Z :WARNING: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2026-01-08T15:34:03.132783Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2026-01-08T15:34:03.132822Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session is aborting and will not restart 2026-01-08T15:34:03.132924Z :DEBUG: [/Root] TraceId [] SessionId [src_id|8bf41d31-4295c278-344113b8-d61748fb_0] MessageGroupId [src_id] Write session: destroy 2026-01-08T15:34:03.133547Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|8bf41d31-4295c278-344113b8-d61748fb_0 grpc read done: success: 0 data: 2026-01-08T15:34:03.133573Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|8bf41d31-4295c278-344113b8-d61748fb_0 grpc read failed 2026-01-08T15:34:03.133605Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|8bf41d31-4295c278-344113b8-d61748fb_0 grpc closed 2026-01-08T15:34:03.133622Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|8bf41d31-4295c278-344113b8-d61748fb_0 is DEAD 2026-01-08T15:34:03.134956Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [1:7593014458225860694:2474] destroyed 2026-01-08T15:34:03.134520Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:34:03.134998Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:34:03.135026Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:03.135041Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.135050Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:03.135063Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.135072Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:03.198955Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:03.198986Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.198998Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:03.199016Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.199026Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:03.299440Z node 2 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:03.299470Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.299480Z node 2 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:03.299495Z node 2 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:03.299505Z node 2 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:03.601855Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [1:7593014458225860729:2478] TxId: 281474976715676. Ctx: { TraceId: 01kef3wmqgb5j999cb6kb8kaqg, Database: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y3YWQ3NGQtZGQ5YThhZDAtYTA1YWM3YjItM2QyZTg1MDM=, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 trace_id# 2026-01-08T15:34:03.602558Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [1:7593014458225860733:2478], TxId: 281474976715676, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3wmqgb5j999cb6kb8kaqg. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=1&id=M2Y3YWQ3NGQtZGQ5YThhZDAtYTA1YWM3YjItM2QyZTg1MDM=. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [1:7593014458225860729:2478], status: UNAVAILABLE, reason: {
: Error: Terminate execution } |87.2%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |87.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table |87.2%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> KqpPg::LongDomainName [GOOD] >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] >> OperationMapping::IndexBuildCanceled [GOOD] >> Yq_1::CreateConnections_With_Idempotency [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToBuild [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToBuildWithOverlap |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |87.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest >> KqpRboYql::OlapPredicatePushdown [GOOD] >> KqpRboYql::OrderBy >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> KqpRboPg::UnionAll [GOOD] >> KqpPg::CheckPgAutoParams-useSink [GOOD] >> KqpRboYql::Aggregation+ColumnStore >> SplitPathTests::WithDatabaseShouldFail [GOOD] >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] Test command err: 2026-01-08T15:33:35.121737Z :FallbackToSingleDb INFO: Random seed for debugging is 1767886415121697 2026-01-08T15:33:35.696063Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014337461170232:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:35.696129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:35.794850Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:35.795386Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:35.848887Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014338988465431:2153];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:35.848942Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:35.895078Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:36.192030Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:36.193603Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:36.384628Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:36.407273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:36.407371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:36.420940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:36.421011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:36.500772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:36.544959Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:36.568591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:36.591507Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:36.685531Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:36.701863Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:36.795267Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:36.883242Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:37.010979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/00127d/r3tmp/yandexvOxrdr.tmp 2026-01-08T15:33:37.011001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/00127d/r3tmp/yandexvOxrdr.tmp 2026-01-08T15:33:37.011130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/00127d/r3tmp/yandexvOxrdr.tmp 2026-01-08T15:33:37.011223Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:37.154912Z INFO: TTestServer started on Port 22303 GrpcPort 14997 PQClient connected to localhost:14997 2026-01-08T15:33:37.543825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:40.651139Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014360463302159:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.651248Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014360463302133:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.651454Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.662134Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014360463302186:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.662227Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.662550Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014360463302189:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.662615Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:40.665347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:40.696122Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014360463302163:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2026-01-08T15:33:40.696261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014337461170232:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:40.696325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:40.805656Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014360463302195:2140] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:40.848727Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014338988465431:2153];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:40.848814Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:41.202219Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014358936007944:2338], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:41.202150Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014360463302209:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:41.204689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:41.205121Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=M2M2OGRiZjgtMWU4NTdlYmQtYWEzMjBmM2EtMWQ5OWJlNzQ=, ActorId: [1:7593014358936007894:2330], ActorState: ExecuteState, LegacyTraceId: 01kef3vysqa0c2qd9f9ejvkxrp, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:41.203038Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=NTA1MzMwLTI2MWNiOWExLTc1MzExM2Y1LThhZjI4ZTZi, ActorId: [2:7593014360463302121:2299], ActorState: ExecuteState, LegacyTraceId: 01kef3vymrdtzss3sfxxtqczhr, R ... ERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.034435Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.034471Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.034483Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.034503Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.034514Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.138301Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.138348Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.138359Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.138376Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.138388Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.238268Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.238298Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.238310Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.238346Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.238359Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.338714Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.338751Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.338767Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.338790Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.338805Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.442426Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.442460Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.442469Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.442483Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.442496Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.542552Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.542579Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.542588Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.542601Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.542610Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist >>> Ready to answer: ok 2026-01-08T15:34:06.615237Z :INFO: [/Root] OnFederationDiscovery fall back to single mode, database=/Root 2026-01-08T15:34:06.615343Z :INFO: [/Root] [] [9fc94741-39fd1999-50792591-c6f8b73e] Open read subsessions to databases: { name: , endpoint: localhost:11229, path: /Root } 2026-01-08T15:34:06.615590Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Starting read session 2026-01-08T15:34:06.615643Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Starting single session 2026-01-08T15:34:06.616207Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2026-01-08T15:34:06.616256Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2026-01-08T15:34:06.616300Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] Reconnecting session to cluster in 0.000000s 2026-01-08T15:34:06.616527Z :ERROR: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] Got error. Status: CLIENT_CALL_UNIMPLEMENTED. Description:
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:11229
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11229. 2026-01-08T15:34:06.616603Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2026-01-08T15:34:06.616647Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2026-01-08T15:34:06.616795Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] Closing session to cluster: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:11229" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:11229
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11229. " } 2026-01-08T15:34:06.620456Z :NOTICE: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:06.620514Z :DEBUG: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] [] Abort session to cluster Got new read session event: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:11229" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:11229
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:11229. " } 2026-01-08T15:34:06.620638Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Closing read session. Close timeout: 0.010000s 2026-01-08T15:34:06.620680Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:06.620723Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Counters: { Errors: 1 CurrentSessionLifetimeMs: 5 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:06.620760Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Closing read session. Close timeout: 0.000000s 2026-01-08T15:34:06.620795Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:06.620827Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Counters: { Errors: 1 CurrentSessionLifetimeMs: 5 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:06.620880Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Closing read session. Close timeout: 0.000000s 2026-01-08T15:34:06.620911Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:06.620946Z :INFO: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Counters: { Errors: 1 CurrentSessionLifetimeMs: 5 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:06.621015Z :NOTICE: [/Root] [/Root] [14060720-966b95ab-e65bd4a3-94b6c7dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:06.646467Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.646506Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.646518Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.646537Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.646549Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.747166Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.747198Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.747209Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.747230Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.747242Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.847628Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:06.847664Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.847676Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:06.847696Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:06.847708Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:06.937649Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:34:06.937687Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |87.2%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 9128, MsgBus: 13937 2026-01-08T15:31:27.888058Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013786133372476:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:27.888095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:28.537289Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:28.629846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:28.630030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:28.703589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:28.860976Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:28.866467Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013786133372449:2081] 1767886287869735 != 1767886287869738 2026-01-08T15:31:29.114561Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:29.127095Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:29.128445Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:29.128471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:29.128479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:29.128552Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:29.909779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:29.928755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:32.889486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013786133372476:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:32.889558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:33.066144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013811903177125:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.066258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.066669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013811903177137:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.066715Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013811903177138:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.066816Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.071045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:33.084159Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013811903177141:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:33.172884Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013811903177195:2541] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 19534, MsgBus: 14667 2026-01-08T15:31:34.427234Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593013819824535244:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:34.427283Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:34.465368Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:34.613400Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:34.621628Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:34.621734Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593013819824535212:2081] 1767886294419277 != 1767886294419280 2026-01-08T15:31:34.704413Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:34.704486Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:34.710495Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:34.863728Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:34.942867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:34.942890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:34.942896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:34.943006Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:35.453133Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:35.595934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:35.604813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:38.910964Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013837004405280:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:38.911065Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:38.912877Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013837004405292:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:38.912877Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593013837004405293:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:38.912947Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:38.916578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:38.929448Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593013837004405296:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:31:39.013542Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593013841299372643:2536] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2 ... :225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014422278216859:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:55.843824Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593014422278216858:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:55.844026Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:55.850520Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:55.874104Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7593014422278216862:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:33:55.960604Z node 10 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [10:7593014422278216913:2546] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:56.042030Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["aid (null, 3)","aid [7, 7]","abalance"],"E-Cost":"0","ReadRangesExpectedSize":"2"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"E-Size":"0","Name":"TableRangeScan","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["aid (null, 3)","aid [7, 7]","abalance"],"E-Cost":"0","ReadRangesExpectedSize":"2"}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 16739, MsgBus: 21867 2026-01-08T15:34:00.637465Z node 11 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7593014444195987861:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:00.637621Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:00.662940Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/script_executions 2026-01-08T15:34:00.797289Z node 11 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:00.819917Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:00.820047Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:00.868717Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:00.882426Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/script_executions 2026-01-08T15:34:01.155344Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:01.155378Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:01.155393Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:01.155514Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:01.654578Z node 11 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:02.160814Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:02.171563Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:34:05.642146Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7593014444195987861:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:05.642251Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:06.929418Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014469965792481:2327], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.929620Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.930754Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014469965792516:2330], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.930820Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014469965792517:2331], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.930887Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.938426Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:06.958571Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014469965792520:2332], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:34:07.062988Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014474260759867:2542] txid# 281474976715659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:07.110720Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateConnections_With_Idempotency [GOOD] Test command err: 2026-01-08T15:33:13.003397Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.008862s 2026-01-08T15:33:13.042935Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.006299s 2026-01-08T15:33:13.153324Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014241915325224:2264];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:13.153383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:14.154985Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:14.186956Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenants". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.187429Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/quotas". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] E0108 15:33:14.192664592 184818 dns_resolver_ares.cc:452] no server name supplied in dns URI E0108 15:33:14.195099978 184818 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2026-01-08T15:33:14.190613Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenant_acks". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.196327Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/result_sets". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.308324Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/mappings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.308397Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/jobs". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.357905Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/queries". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.392742Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/pending_small". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.395332Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/nodes". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.421520Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/connections". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.450582Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:14.450697Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:14.463068Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.515187Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/bindings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.525284Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/compute_databases". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.527525Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.530620Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:395: ydb/core/fq/libs/actors/nodes_manager.cpp:353: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:27771 2026-01-08T15:33:14.595754Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.647936Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/quotas". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.648049Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/idempotency_keys". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.650070Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:14.680773Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.691494Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenants". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.695832Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/result_sets". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.695878Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/mappings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.699698Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/jobs". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.704506Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/queries". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.704513Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/pending_small". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.770593Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/connections". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:27771: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:27771 } ] 2026-01-08T15:33:14.770660Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/bindings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv ... } WatermarksMode: WATERMARKS_MODE_DISABLED } } 2026-01-08T15:34:06.430529Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646926 2026-01-08T15:34:06.430612Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1110: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 SrcEndpoint { ActorId { RawX1: 7593014471592407792 RawX2: 4503616807242072 } } DstEndpoint { ActorId { RawX1: 7593014471592407788 RawX2: 4503616807242072 } } InMemory: true WatermarksMode: WATERMARKS_MODE_DISABLED } 2026-01-08T15:34:06.430682Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:454: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Shards State: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : idempotency_key)], RetryAttempt: 0, ResolveAttempt: 0 } 2026-01-08T15:34:06.430751Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:521: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Sending TEvResolveKeySet update for table 'Root/yq/idempotency_keys', range: [(String : yandexcloud://Execute_folder_id, String : idempotency_key) ; (String : yandexcloud://Execute_folder_id, String : idempotency_key)], attempt #1 2026-01-08T15:34:06.430803Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:469: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. All outputs have been finished. Consider finished 2026-01-08T15:34:06.430813Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:526: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2026-01-08T15:34:06.430847Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646922 2026-01-08T15:34:06.430859Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. enter getasyncinputdata results size 0, freeSpace 8388608 2026-01-08T15:34:06.430871Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1466: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2026-01-08T15:34:06.430881Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:469: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. All outputs have been finished. Consider finished 2026-01-08T15:34:06.430889Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:526: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2026-01-08T15:34:06.430997Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:538: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Received TEvResolveKeySetResult update for table 'Root/yq/idempotency_keys' 2026-01-08T15:34:06.431041Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:646: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Processing resolved ShardId# 72075186224037890, partition range: [(String : yandexcloud://Execute_folder_id, String : idempotency_key) ; ()), i: 0, state ranges: 0, points: 1 2026-01-08T15:34:06.431057Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:684: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Add point to new shardId: 72075186224037890 2026-01-08T15:34:06.431136Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:733: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Pending shards States: TShardState{ TabletId: 72075186224037890, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : idempotency_key)], RetryAttempt: 0, ResolveAttempt: 0 }; In Flight shards States: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : idempotency_key)], RetryAttempt: 0, ResolveAttempt: 1 }; 2026-01-08T15:34:06.431148Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:468: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. effective maxinflight 1024 sorted 0 2026-01-08T15:34:06.431157Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:472: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. BEFORE: 1.0 2026-01-08T15:34:06.431203Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:908: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Send EvRead to shardId: 72075186224037890, tablePath: Root/yq/idempotency_keys, ranges: , limit: (empty maybe), readId = 0, reverse = 0, snapshot = (txid=18446744073709551615,step=1767886446424), lockTxId = 281474976715755, lockNodeId = 4 2026-01-08T15:34:06.431242Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:486: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. AFTER: 0.1 2026-01-08T15:34:06.431253Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:490: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Scheduled table scans, in flight: 1 shards. pending shards to read: 0, 2026-01-08T15:34:06.431999Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:987: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Recv TEvReadResult from ShardID=72075186224037890, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= LockId: 281474976715755 DataShard: 72075186224037890 Generation: 1 Counter: 1 SchemeShard: 72057594046644480 PathId: 54, BrokenTxLocks= 2026-01-08T15:34:06.432012Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1079: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Taken 1 locks 2026-01-08T15:34:06.432022Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1094: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. new data for read #0 seqno = 1 finished = 1 2026-01-08T15:34:06.432039Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 276037645 2026-01-08T15:34:06.432053Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646922 2026-01-08T15:34:06.432065Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1362: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. enter getasyncinputdata results size 1, freeSpace 8388608 2026-01-08T15:34:06.432076Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1258: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. enter pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 0 freeSpace: 8388608 2026-01-08T15:34:06.432093Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1339: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. exit pack cells method shardId: 72075186224037890 processedRows: 0 packed rows: 1 freeSpace: 8388586 2026-01-08T15:34:06.432107Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1401: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. returned 1 rows; processed 1 rows 2026-01-08T15:34:06.432146Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1441: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. dropping batch for read #0 2026-01-08T15:34:06.432157Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:468: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. effective maxinflight 1024 sorted 0 2026-01-08T15:34:06.432169Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:490: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2026-01-08T15:34:06.432183Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1466: TxId: 281474976715755, task: 1, CA Id [4:7593014471592407792:2392]. returned async data processed rows 1 left freeSpace 8388586 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2026-01-08T15:34:06.432291Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:546: SelfId: [4:7593014471592407792:2392], TxId: 281474976715755, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3wqthed5n3vnv1kw7z6jv. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=ZThiODk3YzctYzFhYjc4MS0zMTU0ZDI3Zi01Zjc1ZDEyYg==. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Compute state finished. All channels and sinks finished 2026-01-08T15:34:06.432458Z node 4 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715755;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2026-01-08T15:34:06.539477Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv6:%5B::%5D:14116: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:14116 |87.2%| [TM] {BAZEL_UPLOAD} ydb/services/fq/ut_integration/unittest |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] >> KqpRboPg::ExpressionSubquery [GOOD] >> KqpRboPg::FallbackToYqlEnabled |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/ut/unittest >> TxUsage::WriteToTopic_Demo_46_Table [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> KqpRboPg::Bench_Select [GOOD] >> KqpRboPg::Bench_Filter >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 >> TPopulatorTest::RemoveDir >> TPopulatorQuorumTest::OneDisconnectedRingGroup >> TxUsage::WriteToTopic_Demo_46_Query >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table >> TPopulatorTest::RemoveDir [GOOD] >> TPopulatorTest::MakeDir |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TContinuousBackupTests::Basic >> DataShardStats::CollectKeySampleFollower [GOOD] |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_populator/unittest >> TContinuousBackupTests::TakeIncrementalBackup ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] Test command err: 2026-01-08T15:33:36.615001Z :WriteSessionCloseWaitsForWrites INFO: Random seed for debugging is 1767886416614963 2026-01-08T15:33:37.175670Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014346067130363:2192];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.176219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:37.242678Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014347194055616:2273];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.242776Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:37.285880Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:37.337849Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:37.939184Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:38.006678Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:38.169351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:38.169484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:38.184666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:38.184745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:38.248496Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:38.248600Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:38.279140Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:38.288071Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:38.289971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:38.292065Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:38.330205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:38.340565Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:38.373746Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:38.385483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:38.683820Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:38.787484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/001258/r3tmp/yandexd90x0V.tmp 2026-01-08T15:33:38.787506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/001258/r3tmp/yandexd90x0V.tmp 2026-01-08T15:33:38.795145Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/001258/r3tmp/yandexd90x0V.tmp 2026-01-08T15:33:38.798439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:38.863223Z INFO: TTestServer started on Port 13832 GrpcPort 30345 PQClient connected to localhost:30345 2026-01-08T15:33:39.253548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:42.178644Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014346067130363:2192];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:42.178747Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:42.238188Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014347194055616:2273];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:42.238254Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:42.633124Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014368668892213:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.633236Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014368668892225:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.633323Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.636063Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014368668892229:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.636146Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.641488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:42.688844Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014368668892228:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2026-01-08T15:33:43.032381Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014368668892258:2141] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:43.059580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:43.064289Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014367541968091:2338], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:43.066240Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014372963859569:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:43.066732Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ZjU3Y2UxMmItZDJmNjlmYjQtNWVkNmNmYTMtYWIxOTMwNTQ=, ActorId: [1:7593014367541968073:2331], ActorState: ExecuteState, LegacyTraceId: 01kef3w0nha551jymwpy55jj0y, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:43.066849Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=YzNiM2NkNDEtMTkzMGRiNWQtMzcyYzg2YTUtNGRjNjZmYTg=, ActorId: [2:7593014368668892211:2303], ActorState: ExecuteState, LegacyTraceId: 01kef3w0k34f8qqggc6qje4e3v, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it doe ... ession v1 cookie: 1 sessionId: src|804f3d4e-17fb9652-b68814c1-3cfcb936_0 is DEAD 2026-01-08T15:34:06.919349Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison Session was created >>> Ready to answer: ok 2026-01-08T15:34:07.026875Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.026905Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.026918Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.026934Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.026945Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.029104Z :ERROR: [/Root] OnFederationDiscovery: Got error. Status: UNAVAILABLE. Description: 2026-01-08T15:34:07.126930Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.126994Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.127008Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.127025Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.127038Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.230449Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.230491Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.230505Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.230521Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.230533Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.334439Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.334478Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.334491Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.334508Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.334520Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.434435Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.434472Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.434487Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.434507Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.434519Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.534592Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.534626Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.534639Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.534655Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.534667Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.642425Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.642463Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.642478Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.642496Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.642509Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:07.781916Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:07.781941Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.781950Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:07.781962Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:07.781971Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:09.684531Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:09.684556Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.684566Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:09.684578Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.684587Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:09.782431Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:09.782468Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.782486Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:09.782504Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.782517Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:09.882479Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:09.882515Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.882543Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:09.882563Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.882578Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:09.983133Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:09.983163Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.983178Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:09.983196Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:09.983208Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:10.086427Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:10.086466Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.086481Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:10.086500Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.086512Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:10.190523Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:10.190555Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.190569Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:10.190585Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.190598Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:10.294526Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:10.294563Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.294579Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:10.294601Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:10.294615Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist |87.2%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0] replicaActorToServiceMap: actor: [1:6:2053], service: [1:2199047594611:0] actor: [1:4:2051], service: [1:24339059:0] actor: [1:5:2052], service: [1:1099535966835:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2026-01-08T15:34:12.891556Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:807: [1:28:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2026-01-08T15:34:12.899024Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:676: [1:28:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 36, preserialized size# 0 2026-01-08T15:34:12.899145Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:693: [1:28:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 0 ... waiting for updates from replica populators 2026-01-08T15:34:12.901842Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:243: [1:29:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:4:2051] 2026-01-08T15:34:12.901918Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:29:2076] Successful handshake: replica# [1:4:2051] 2026-01-08T15:34:12.901979Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:259: [1:29:2076] Start full sync: replica# [1:4:2051] 2026-01-08T15:34:12.902057Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:243: [1:30:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2026-01-08T15:34:12.902081Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2077] Successful handshake: replica# [1:5:2052] 2026-01-08T15:34:12.902100Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:259: [1:30:2077] Start full sync: replica# [1:5:2052] 2026-01-08T15:34:12.902133Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:243: [1:31:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:6:2053] 2026-01-08T15:34:12.902157Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2078] Successful handshake: replica# [1:6:2053] 2026-01-08T15:34:12.902178Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:259: [1:31:2078] Start full sync: replica# [1:6:2053] 2026-01-08T15:34:12.902255Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:29:2076] 2026-01-08T15:34:12.902360Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:29:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 0 } }: sender# [1:28:2075] 2026-01-08T15:34:12.902578Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:638: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:29:2076] 2026-01-08T15:34:12.902643Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:29:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:28:2075], cookie# 0 2026-01-08T15:34:12.902733Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:30:2077] 2026-01-08T15:34:12.902793Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:29:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:4:2051], cookie# 0 2026-01-08T15:34:12.902871Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:30:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 0 } }: sender# [1:28:2075] 2026-01-08T15:34:12.902989Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:638: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2077] 2026-01-08T15:34:12.903039Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:30:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:28:2075], cookie# 0 2026-01-08T15:34:12.903130Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:30:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:5:2052], cookie# 0 2026-01-08T15:34:12.903197Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:31:2078] 2026-01-08T15:34:12.903273Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:31:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 0 } }: sender# [1:28:2075] 2026-01-08T15:34:12.903339Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:638: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2078] 2026-01-08T15:34:12.903400Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:29:2076] 2026-01-08T15:34:12.903450Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:29:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:28:2075] 2026-01-08T15:34:12.903503Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:31:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:28:2075], cookie# 0 2026-01-08T15:34:12.903579Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:28:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:29:2076], cookie# 0 2026-01-08T15:34:12.903614Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:28:2075] Ack for unknown update (already acked?): sender# [1:29:2076], cookie# 0 2026-01-08T15:34:12.903676Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:309: [1:29:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:4:2051] 2026-01-08T15:34:12.903740Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:31:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:6:2053], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2026-01-08T15:34:12.903833Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:30:2077] 2026-01-08T15:34:12.903904Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:30:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:28:2075] 2026-01-08T15:34:12.903951Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:309: [1:30:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] 2026-01-08T15:34:12.903997Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:28:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:30:2077], cookie# 0 2026-01-08T15:34:12.904027Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:28:2075] Ack for unknown update (already acked?): sender# [1:30:2077], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2026-01-08T15:34:12.904088Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:543: [1:28:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:31:2078] 2026-01-08T15:34:12.904130Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:275: [1:31:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:28:2075] 2026-01-08T15:34:12.904181Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:28:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:31:2078], cookie# 0 2026-01-08T15:34:12.904242Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:28:2075] Ack for unknown update (already acked?): sender# [1:31:2078], cookie# 0 2026-01-08T15:34:12.904269Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:309: [1:31:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:6:2053] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:29:2076], replica: [1:24339059:0] populator: [1:30:2077], replica: [1:1099535966835:0] populator: [1:31:2078], replica: [1:2199047594611:0] 2026-01-08T15:34:12.904463Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:28:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:29:2076], cookie# 12345 2026-01-08T15:34:12.914889Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:28:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 0 }: sender# [1:30:2077], cookie# 12345 2026-01-08T15:34:12.914982Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:28:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 0 |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_populator/unittest >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::RemoveDir [GOOD] Test command err: 2026-01-08T15:34:12.945210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:12.945267Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... ner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.398762Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 8 }: sender# [1:97:2125], cookie# 101 2026-01-08T15:34:13.398854Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 8 }: sender# [1:98:2126], cookie# 101 2026-01-08T15:34:13.398885Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 8 2026-01-08T15:34:13.398981Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:12:2059], cookie# 101 2026-01-08T15:34:13.399047Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:15:2062], cookie# 101 2026-01-08T15:34:13.399099Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:18:2065], cookie# 101 2026-01-08T15:34:13.399456Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 8 }: sender# [1:99:2127], cookie# 101 2026-01-08T15:34:13.399579Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:97:2125], cookie# 101 2026-01-08T15:34:13.399730Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:98:2126], cookie# 101 2026-01-08T15:34:13.399775Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], version# 3 2026-01-08T15:34:13.400253Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:99:2127], cookie# 101 2026-01-08T15:34:13.400291Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:96:2124] Ack for unknown update (already acked?): sender# [1:99:2127], cookie# 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 FAKE_COORDINATOR: Erasing txId 101 2026-01-08T15:34:13.402142Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:676: [1:96:2124] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 8 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 36 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:71:2111], cookie# 101, event size# 447, preserialized size# 65 2026-01-08T15:34:13.402203Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:693: [1:96:2124] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 101, is deletion# false, version: 9 2026-01-08T15:34:13.402577Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.402637Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.402671Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.403016Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:676: [1:96:2124] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/Root/DirB\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 38], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000038, drop txId: 101" Path: "/Root/DirB" PathId: 38 LastExistedPrefixPath: "/Root" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 72057594046678944 }: sender# [1:71:2111], cookie# 101, event size# 307, preserialized size# 0 2026-01-08T15:34:13.403062Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:693: [1:96:2124] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], cookie# 101, is deletion# true, version: 0 2026-01-08T15:34:13.403145Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:12:2059], cookie# 101 2026-01-08T15:34:13.403195Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:15:2062], cookie# 101 2026-01-08T15:34:13.403264Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:18:2065], cookie# 101 2026-01-08T15:34:13.403323Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:97:2125], cookie# 101 2026-01-08T15:34:13.403358Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.403396Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.403431Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 101 2026-01-08T15:34:13.403665Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:98:2126], cookie# 101 2026-01-08T15:34:13.403702Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 9 2026-01-08T15:34:13.403768Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:12:2059], cookie# 101 2026-01-08T15:34:13.403808Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:15:2062], cookie# 101 2026-01-08T15:34:13.403840Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:18:2065], cookie# 101 2026-01-08T15:34:13.404134Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 9 }: sender# [1:99:2127], cookie# 101 2026-01-08T15:34:13.404211Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:97:2125], cookie# 101 2026-01-08T15:34:13.404434Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:98:2126], cookie# 101 2026-01-08T15:34:13.404479Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], version# 18446744073709551615 2026-01-08T15:34:13.404539Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 18446744073709551615 }: sender# [1:99:2127], cookie# 101 2026-01-08T15:34:13.404585Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:96:2124] Ack for unknown update (already acked?): sender# [1:99:2127], cookie# 101 TestModificationResult got TxId: 101, wait until txId: 101 |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::MakeDir [GOOD] |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |87.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 >> KqpRboYql::ConstantFolding+ColumnStore [GOOD] >> KqpRboYql::ConstantFolding-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::MakeDir [GOOD] Test command err: 2026-01-08T15:34:13.871000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:13.871074Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... ck to# [1:71:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 6 2026-01-08T15:34:14.286087Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:12:2059], cookie# 100 2026-01-08T15:34:14.286151Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:15:2062], cookie# 100 2026-01-08T15:34:14.286207Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:18:2065], cookie# 100 2026-01-08T15:34:14.286589Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:97:2125], cookie# 100 2026-01-08T15:34:14.286916Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:97:2125], cookie# 100 2026-01-08T15:34:14.287061Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:98:2126], cookie# 100 2026-01-08T15:34:14.287147Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], version# 2 FAKE_COORDINATOR: Add transaction: 100 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000037 2026-01-08T15:34:14.287384Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 2 }: sender# [1:99:2127], cookie# 100 2026-01-08T15:34:14.287419Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:96:2124] Ack for unknown update (already acked?): sender# [1:99:2127], cookie# 100 2026-01-08T15:34:14.289178Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:676: [1:96:2124] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:71:2111], cookie# 100, event size# 555, preserialized size# 119 2026-01-08T15:34:14.289239Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:693: [1:96:2124] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 7 2026-01-08T15:34:14.289413Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 2026-01-08T15:34:14.289460Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 2026-01-08T15:34:14.289493Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 FAKE_COORDINATOR: Erasing txId 100 2026-01-08T15:34:14.290166Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:676: [1:96:2124] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000037 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944 }: sender# [1:71:2111], cookie# 100, event size# 314, preserialized size# 2 2026-01-08T15:34:14.290233Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:693: [1:96:2124] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], cookie# 100, is deletion# false, version: 3 2026-01-08T15:34:14.290360Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:12:2059], cookie# 100 2026-01-08T15:34:14.290412Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:15:2062], cookie# 100 2026-01-08T15:34:14.290473Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:18:2065], cookie# 100 2026-01-08T15:34:14.290572Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:97:2125], cookie# 100 2026-01-08T15:34:14.290610Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 2026-01-08T15:34:14.290645Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 2026-01-08T15:34:14.290681Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:283: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2124], cookie# 100 2026-01-08T15:34:14.290821Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:98:2126], cookie# 100 2026-01-08T15:34:14.290861Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 7 2026-01-08T15:34:14.290924Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 }: sender# [1:99:2127], cookie# 100 2026-01-08T15:34:14.290976Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:97:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:12:2059], cookie# 100 2026-01-08T15:34:14.291020Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:98:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:15:2062], cookie# 100 2026-01-08T15:34:14.291052Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:291: [1:99:2127] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:18:2065], cookie# 100 2026-01-08T15:34:14.291330Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:97:2125], cookie# 100 2026-01-08T15:34:14.291431Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:98:2126], cookie# 100 2026-01-08T15:34:14.291457Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:786: [1:96:2124] Ack update: ack to# [1:71:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 38], version# 3 2026-01-08T15:34:14.291816Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:758: [1:96:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 38] Version: 3 }: sender# [1:99:2127], cookie# 100 2026-01-08T15:34:14.291851Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:764: [1:96:2124] Ack for unknown update (already acked?): sender# [1:99:2127], cookie# 100 TestModificationResult got TxId: 100, wait until txId: 100 |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_populator/unittest >> KqpRboYql::Filter+ColumnStore [GOOD] >> KqpRboYql::Filter-ColumnStore >> TKeyValueTest::TestBasicWriteReadOverrun [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest >> TContinuousBackupTests::Basic [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn >> YdbIndexTable::MultiShardTableOneIndex |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.2%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:34:13.990471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:13.990558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:13.990600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:13.990636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:13.990673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:13.990737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:13.990816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:13.990899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:13.991798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:13.992119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:14.086297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:14.086377Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:14.102054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:14.102411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:14.102598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:14.114403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:14.114820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:14.115496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:14.116216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:14.119212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:14.119378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:14.120483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:14.120546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:14.120666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:14.120725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:14.120766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:14.120895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:14.280951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.281925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.282950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.283012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.283117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.283195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.283294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... RD DEBUG: schemeshard_impl.cpp:6725: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000041 OrderId: 104 ExecLatency: 0 ProposeLatency: 7 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 803 } } CommitVersion { Step: 5000041 TxId: 104 } 2026-01-08T15:34:15.875035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1837: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2026-01-08T15:34:15.875433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000041 OrderId: 104 ExecLatency: 0 ProposeLatency: 7 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 803 } } CommitVersion { Step: 5000041 TxId: 104 } 2026-01-08T15:34:15.875596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000041 OrderId: 104 ExecLatency: 0 ProposeLatency: 7 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 803 } } CommitVersion { Step: 5000041 TxId: 104 } debug: NTableState::TProposedWaitParts operationId# 104:0 FAKE_COORDINATOR: Erasing txId 104 2026-01-08T15:34:15.876580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5926: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 706 RawX2: 4294969988 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2026-01-08T15:34:15.876638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1837: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2026-01-08T15:34:15.876801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 706 RawX2: 4294969988 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2026-01-08T15:34:15.876877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1081: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2026-01-08T15:34:15.876993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 706 RawX2: 4294969988 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2026-01-08T15:34:15.877071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:673: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:15.877112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:710: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:34:15.877164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:722: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2026-01-08T15:34:15.877212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 104:0 129 -> 240 2026-01-08T15:34:15.887606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:34:15.888012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:34:15.888194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:34:15.888243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 104:0 ProgressState 2026-01-08T15:34:15.888362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#104:0 progress is 3/3 2026-01-08T15:34:15.888400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:34:15.888464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#104:0 progress is 3/3 2026-01-08T15:34:15.888500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:34:15.888535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2026-01-08T15:34:15.888632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:735:2713] message: TxId: 104 2026-01-08T15:34:15.888691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:34:15.888727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:0 2026-01-08T15:34:15.888761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:0 2026-01-08T15:34:15.888914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 4 2026-01-08T15:34:15.888968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:1 2026-01-08T15:34:15.888988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:1 2026-01-08T15:34:15.889020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:34:15.889041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:2 2026-01-08T15:34:15.889059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:2 2026-01-08T15:34:15.889109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:34:15.889710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:34:15.889775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 40], at schemeshard: 72057594046678944 2026-01-08T15:34:15.889865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:34:15.889918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 39], at schemeshard: 72057594046678944 2026-01-08T15:34:15.889952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 3 2026-01-08T15:34:15.900103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2026-01-08T15:34:15.900191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1090:3003] 2026-01-08T15:34:15.900852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2026-01-08T15:34:15.901371Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:34:15.901653Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl" took 258us result status StatusPathDoesNotExist 2026-01-08T15:34:15.901842Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 38])" Path: "/MyRoot/Table/0_continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 38 LastExistedPrefixDescription { Self { Name: "Table" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2026-01-08T15:34:15.902447Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:34:15.902647Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl/streamImpl" took 210us result status StatusPathDoesNotExist 2026-01-08T15:34:15.902797Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 38])" Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 38 LastExistedPrefixDescription { Self { Name: "Table" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:59:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:59:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:78:2057] recipient: [2:39:2086] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:81:2057] recipient: [2:80:2112] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:83:2057] recipient: [2:80:2112] !Reboot 72057594037927937 (actor [2:58:2099]) rebooted! !Reboot 72057594037927937 (actor [2:58:2099]) tablet resolver refreshed! new actor is[2:82:2113] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:198:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:59:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:78:2057] recipient: [3:39:2086] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:81:2057] recipient: [3:80:2112] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:83:2057] recipient: [3:80:2112] !Reboot 72057594037927937 (actor [3:58:2099]) rebooted! !Reboot 72057594037927937 (actor [3:58:2099]) tablet resolver refreshed! new actor is[3:82:2113] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:198:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:59:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:79:2057] recipient: [4:39:2086] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:82:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:84:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:58:2099]) rebooted! !Reboot 72057594037927937 (actor [4:58:2099]) tablet resolver refreshed! new actor is[4:83:2113] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:199:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:59:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:82:2057] recipient: [5:39:2086] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:85:2057] recipient: [5:84:2115] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:87:2057] recipient: [5:84:2115] !Reboot 72057594037927937 (actor [5:58:2099]) rebooted! !Reboot 72057594037927937 (actor [5:58:2099]) tablet resolver refreshed! new actor is[5:86:2116] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:202:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:59:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:82:2057] recipient: [6:39:2086] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:85:2057] recipient: [6:84:2115] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:87:2057] recipient: [6:84:2115] !Reboot 72057594037927937 (actor [6:58:2099]) rebooted! !Reboot 72057594037927937 (actor [6:58:2099]) tablet resolver refreshed! new actor is[6:86:2116] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:202:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:59:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:83:2057] recipient: [7:39:2086] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:86:2057] recipient: [7:85:2115] Leader for TabletID 72057594037927937 is [7:87:2116] sender: [7:88:2057] recipient: [7:85:2115] !Reboot 72057594037927937 (actor [7:58:2099]) rebooted! !Reboot 72057594037927937 (actor [7:58:2099]) tablet resolver refreshed! new actor is[7:87:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:59:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:76:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:59:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:76:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:59:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:78:2057] recipient: [10:39:2086] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:81:2057] recipient: [10:80:2112] Leader for TabletID 72057594037927937 is [10:82:2113] sender: [10:83:2057] recipient: [10:80:2112] !Reboot 72057594037927937 (actor [10:58:2099]) rebooted! !Reboot 72057594037927937 (actor [10:58:2099]) tablet resolver refreshed! new actor is[10:82:2113] Leader for TabletID 72057594037927937 is [10:82:2113] sender: [10:198:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:59:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:78:2057] recipient: [11:39:2086] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:81:2057] recipient: [11:80:2112] Leader for TabletID 72057594037927937 is [11:82:2113] sender: [11:83:2057] recipient: [11:80:2112] !Reboot 72057594037927937 (actor [11:58:2099]) rebooted! !Reboot 72057594037927937 (actor [11:58:2099]) tablet resolver refreshed! new actor is[11:82:2113] Leader for TabletID 72057594037927937 is [11:82:2113] sender: [11:198:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:59:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:79:2057] recipient: [12:39:2086] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:82:2057] recipient: [12:81:2112] Leader for TabletID 72057594037927937 is [12:83:2113] sender: [12:84:2057] recipient: [12:81:2112] !Reboot 72057594037927937 (actor [12:58:2099]) rebooted! !Reboot 72057594037927937 (actor [12:58:2099]) tablet resolver refreshed! new actor is[12:83:2113] Leader for TabletID 72057594037927937 is [12:83:2113] sender: [12:199:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:59:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:82:2057] recipient: [13:39:2086] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:85:2057] recipient: [13:84:2115] Leader for TabletID 72057594037927937 is [13:86:2116] sender: [13:87:2057] recipient: [13:84:2115] !Reboot 72057594037927937 (actor [13:58:2099]) rebooted! !Reboot 72057594037927937 (actor [13:58:2099]) tablet resolver refreshed! new actor is[13:86:2116] Leader for TabletID 72057594037927937 is [13:86:2116] sender: [13:202:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:59:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:82:2057] recipient: [14:39:2086] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:85:2057] recipient: [14:84:2115] Leader for TabletID 72057594037927937 is [14:86:2116] sender: [14:87:2057] recipient: [14:84:2115] !Reboot 72057594037927937 (actor [14:58:2099]) rebooted! !Reboot 72057594037927937 (actor [14:58:2099]) tablet resolver refreshed! new actor is[14:86:2116] Leader for TabletID 72057594037927937 is [14:86:2116] sender: [14:202:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2097] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:59:2057] recipient: [15:52:2097] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:83:2057] recipient: [15:39:2086] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:86:2057] recipient: [15:85:2115] Leader for TabletID 72057594037927937 is [15:87:2116] sender: [15:88:2057] recipient: [15:85:2115] !Reboot 72057594037927937 (actor [15:58:2099]) rebooted! !Reboot 72057594037927937 (actor [15:58:2099]) tablet resolver refreshed! new actor is[15:87:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:59:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:76:2057] recipient: [16:14:2061] 2026-01-08T15:34:16.158808Z node 17 :KEYVALUE ERROR: keyvalue_storage_read_request.cpp:256: {KV323@keyvalue_storage_read_request.cpp:256} Received BLOCKED EvGetResult. KeyValue# 72057594037927937 Status# BLOCKED Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 0 ErrorReason# block race detected 2026-01-08T15:34:16.178881Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1006: Tablet: 72057594037927937 HandleBlockBlobStorageResult, msg->Status: ALREADY, not discovered Marker# TSYS21 2026-01-08T15:34:16.178957Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1925: Tablet: 72057594037927937 Type: KeyValue, EReason: ReasonBootBSError, SuggestedGeneration: 0, KnownGeneration: 3 Marker# TSYS31 |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup/unittest |87.2%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.2%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |87.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |87.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index >> TTxDataShardReshuffleKMeansScan::MainToBuildWithOverlap [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToPosting |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] >> KqpRboPg::Bench_Filter [GOOD] >> KqpRboPg::Bench_CrossFilter |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |87.3%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> KqpRboPg::FallbackToYqlEnabled [GOOD] >> KqpRboPg::FallbackToYqlDisabled |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/tx-schemeshard-ut_continuous_backup_reboots |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/tx-schemeshard-ut_continuous_backup_reboots |87.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup_reboots/tx-schemeshard-ut_continuous_backup_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] Test command err: 2026-01-08T15:33:12.567374Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014240421266570:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:12.582909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:13.119881Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.119994Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/quotas". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] E0108 15:33:13.155961109 184681 dns_resolver_ares.cc:452] no server name supplied in dns URI E0108 15:33:13.156086810 184681 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2026-01-08T15:33:13.295049Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/nodes". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.598696Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:13.618729Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenant_acks". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.618959Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.618997Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/compute_databases". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.619292Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/pending_small". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.619338Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenants". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.723181Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/jobs". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.789422Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/mappings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.799665Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/result_sets". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.799757Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/queries". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.800957Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.801096Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/idempotency_keys". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.803641Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/quotas". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.807092Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:13.807254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:13.814094Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/bindings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.825085Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:395: ydb/core/fq/libs/actors/nodes_manager.cpp:353: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:19234 2026-01-08T15:33:13.830382Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/connections". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.846647Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/nodes". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.870000Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.870124Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/pending_small". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.885116Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenant_acks". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.901004Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/tenants". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.918449Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/compute_databases". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.941730Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/jobs". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.941824Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/mappings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.943343Z node 1 :YQ_CONTROL_PLANE_STORAGE WARN: schema.cpp:297: Create table "Root/yq/bindings". Create session error: [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:19234 } ] 2026-01-08T15:33:13.943455Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): connections to all backends failing; last error: UNKNOWN: ipv4:127.0.0.1:19234: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint local ... 5.788252Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1401: TxId: 281474976715772, task: 1, CA Id [4:7593014509231076969:3096]. returned 1 rows; processed 1 rows 2026-01-08T15:34:15.788281Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1441: TxId: 281474976715772, task: 1, CA Id [4:7593014509231076969:3096]. dropping batch for read #0 2026-01-08T15:34:15.788289Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:468: TxId: 281474976715772, task: 1, CA Id [4:7593014509231076969:3096]. effective maxinflight 1024 sorted 0 2026-01-08T15:34:15.788297Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:490: TxId: 281474976715772, task: 1, CA Id [4:7593014509231076969:3096]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2026-01-08T15:34:15.788310Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1466: TxId: 281474976715772, task: 1, CA Id [4:7593014509231076969:3096]. returned async data processed rows 1 left freeSpace 8388557 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2026-01-08T15:34:15.788408Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:546: SelfId: [4:7593014509231076969:3096], TxId: 281474976715772, task: 1. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Compute state finished. All channels and sinks finished 2026-01-08T15:34:15.788554Z node 4 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715772;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2026-01-08T15:34:15.788696Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646922 2026-01-08T15:34:15.788774Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:413: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2026-01-08T15:34:15.788853Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:5019: SelfId: [4:7593014509231076972:3097], TxId: 281474976715772, task: 2. Add data: 233 / 233 2026-01-08T15:34:15.788889Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4987: SelfId: [4:7593014509231076972:3097], TxId: 281474976715772, task: 2. Send data=233, closed=1, bufferActorId=[4:7593014509231076964:2360] 2026-01-08T15:34:15.788905Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:427: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 233 2026-01-08T15:34:15.788916Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:2128: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Waiting finish of sink[0] 2026-01-08T15:34:15.789076Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2941: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Create new TableWriteActor for table `Root/yq/connections` ([72057594046644480:40:1]). lockId=281474976715765. ActorId=[4:7593014509231076974:2360] 2026-01-08T15:34:15.789129Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:481: Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Open: token=0 2026-01-08T15:34:15.789314Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:489: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Write: token=0 2026-01-08T15:34:15.789414Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:496: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Close: token=0 2026-01-08T15:34:15.789459Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4892: SelfId: [4:7593014509231076972:3097], TxId: 281474976715772, task: 2. TKqpForwardWriteActor recieve EvBufferWriteResult from [4:7593014509231076964:2360] 2026-01-08T15:34:15.789473Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4910: SelfId: [4:7593014509231076972:3097], TxId: 281474976715772, task: 2. Finished 2026-01-08T15:34:15.789492Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:160: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. CA StateFunc 271646922 2026-01-08T15:34:15.789515Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:546: SelfId: [4:7593014509231076970:3097], TxId: 281474976715772, task: 2. Ctx: { CheckpointId : . TraceId : 01kef3x0kb1xnzqnrt90gkqtk6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=4&id=YTUyY2I1MjMtMjUyYzdjNjUtMmZiYjIxOGEtMWJjMmE0ZjA=. CurrentExecutionId : . Database : . DatabaseId : /Root. }. Compute state finished. All channels and sinks finished 2026-01-08T15:34:15.789615Z node 4 :KQP_COMPUTE DEBUG: log.cpp:841: fline=kqp_compute_actor_factory.cpp:60;problem=finish_compute_actor;tx_id=281474976715772;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2026-01-08T15:34:15.789992Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3486: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Start prepare for distributed commit 2026-01-08T15:34:15.790006Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1067: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]SetPrepare; txId=281474976715772 2026-01-08T15:34:15.790021Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3409: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Flush data 2026-01-08T15:34:15.790156Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1235: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Send EvWrite to ShardID=72075186224037890, isPrepare=1, isImmediateCommit=0, TxId=281474976715772, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715765 DataShard: 72075186224037890 Generation: 1 Counter: 2 SchemeShard: 72057594046644480 PathId: 40, Size=320, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=1, BufferMemory=320 2026-01-08T15:34:15.790253Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3620: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Send EvWrite (external) to ShardID=72075186224037897, isPrepare=1, isRollback=0, TxId=281474976715772, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715765 DataShard: 72075186224037897 Generation: 1 Counter: 2 SchemeShard: 72057594046644480 PathId: 41, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2026-01-08T15:34:15.796186Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:740: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_PREPARED, TxId=281474976715772, Locks= , Cookie=1 2026-01-08T15:34:15.796282Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3409: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Flush data 2026-01-08T15:34:15.796330Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4150: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Recv EvWriteResult (external) from ShardID=72075186224037897, Status=STATUS_PREPARED, TxId=281474976715772, Locks= , Cookie=0 2026-01-08T15:34:15.796347Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4468: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Got prepared result TxId=281474976715772, TabletId=72075186224037897, Cookie=0 2026-01-08T15:34:15.796373Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3532: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Start distributed commit with TxId=281474976715772 2026-01-08T15:34:15.796386Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1075: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]SetDistributedCommit; txId=281474976715772 2026-01-08T15:34:15.796413Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3719: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Execute planned transaction, coordinator: 72057594046316545, volitale: 1, shards: 2 2026-01-08T15:34:15.797777Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3796: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Got transaction status, status: 16 2026-01-08T15:34:15.804806Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3796: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Got transaction status, status: 17 2026-01-08T15:34:15.816504Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4182: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Recv EvWriteResult (external) from ShardID=72075186224037897, Status=STATUS_COMPLETED, TxId=281474976715772, Locks= , Cookie=0 2026-01-08T15:34:15.816555Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4497: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Got completed result TxId=281474976715772, TabletId=72075186224037897, Cookie=0, Locks= 2026-01-08T15:34:15.818221Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:740: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Recv EvWriteResult from ShardID=72075186224037890, Status=STATUS_COMPLETED, TxId=281474976715772, Locks= , Cookie=0 2026-01-08T15:34:15.818265Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1023: SelfId: [4:7593014509231076974:2360], Table: `Root/yq/connections` ([72057594046644480:40:1]), SessionActorId: [4:7593014393266955181:2360]Got completed result TxId=281474976715772, TabletId=72075186224037890, Cookie=0, Mode=2, Locks= 2026-01-08T15:34:15.818300Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:4535: SelfId: [4:7593014509231076964:2360], SessionActorId: [4:7593014393266955181:2360], Committed TxId=281474976715772 |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/fq/ut_integration/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> KqpRboYql::Filter-ColumnStore [GOOD] >> KqpRboYql::ExpressionSubquery >> BasicUsage::ReadMirrored [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:34:14.356246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:14.356341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:14.356380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:14.356422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:14.356466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:14.356534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:14.356614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:14.356711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:14.357696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:14.358027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:14.446061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:14.446141Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:14.458868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:14.459261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:14.459460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:14.472354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:14.472819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:14.473566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:14.474358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:14.478035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:14.478268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:14.479463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:14.479526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:14.479620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:14.479659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:14.479691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:14.479861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:14.651992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.653974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:14.654723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... l shard schema changes has been received, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2026-01-08T15:34:17.828266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:722: send schema changes ack message, operation: 281474976710757:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2026-01-08T15:34:17.828319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710757:0 129 -> 240 2026-01-08T15:34:17.832617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2026-01-08T15:34:17.833265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2026-01-08T15:34:17.833451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2026-01-08T15:34:17.833507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 281474976710757:0 ProgressState 2026-01-08T15:34:17.833713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710757:0 progress is 3/3 2026-01-08T15:34:17.833770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 3/3 2026-01-08T15:34:17.833832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710757:0 progress is 3/3 2026-01-08T15:34:17.833876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 3/3 2026-01-08T15:34:17.833955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710757, ready parts: 3/3, is published: true 2026-01-08T15:34:17.834049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:1368:3188] message: TxId: 281474976710757 2026-01-08T15:34:17.834114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 3/3 2026-01-08T15:34:17.834163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710757:0 2026-01-08T15:34:17.834201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710757:0 2026-01-08T15:34:17.834407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 5 2026-01-08T15:34:17.834461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710757:1 2026-01-08T15:34:17.834485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710757:1 2026-01-08T15:34:17.834519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:34:17.834544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710757:2 2026-01-08T15:34:17.834566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710757:2 2026-01-08T15:34:17.834675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:34:17.835389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:34:17.835453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 40], at schemeshard: 72057594046678944 2026-01-08T15:34:17.835551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:34:17.835602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 39], at schemeshard: 72057594046678944 2026-01-08T15:34:17.835646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 4 2026-01-08T15:34:17.840596Z node 1 :CONTINUOUS_BACKUP ERROR: schemeshard_backup_incremental__progress.cpp:189: TIncrementalBackup::TTxProgress: Incremental backup with id# 0 not found 2026-01-08T15:34:17.840969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:34:20.212362Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:34:20.212656Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl" took 343us result status StatusPathDoesNotExist 2026-01-08T15:34:20.212874Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 38])" Path: "/MyRoot/Table/0_continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 38 LastExistedPrefixDescription { Self { Name: "Table" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2026-01-08T15:34:20.213537Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:34:20.213793Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl/streamImpl" took 224us result status StatusPathDoesNotExist 2026-01-08T15:34:20.213939Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 38])" Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 38 LastExistedPrefixDescription { Self { Name: "Table" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2026-01-08T15:34:20.214565Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:34:20.214827Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 270us result status StatusSuccess 2026-01-08T15:34:20.215325Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000040 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IncrementalBackupConfig { Mode: RESTORE_MODE_INCREMENTAL_BACKUP Consistency: CONSISTENCY_WEAK } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } } PathId: 42 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest >> KqpRboYql::ConstantFolding-ColumnStore [GOOD] >> KqpRboYql::BasicJoins >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink |87.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> BasicUsage::AlterTopicWithSharedConsumer_DeleteDeadLetterPolicy_AlterMoveDeadLetterPolicy [GOOD] >> BasicUsage::AlterDeadLetterPolicy_StreamingConsumer |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> BasicUsage::WaitEventBlocksBeforeDiscovery [GOOD] >> BasicUsage::SimpleHandlers |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::ReadMirrored [GOOD] Test command err: 2026-01-08T15:33:31.166059Z :PropagateSessionClosed INFO: Random seed for debugging is 1767886411166020 2026-01-08T15:33:31.882520Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014321836654596:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:31.932854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.103247Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:32.138608Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:32.178964Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014326060300615:2099];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:32.179037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.186575Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:32.293660Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:32.605142Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:32.655860Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:32.801975Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:32.812006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:32.812126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:32.821916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:32.822017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:32.845605Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:32.845810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:32.847042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.060250Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.069410Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:33.092387Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.266578Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.412549Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.438818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0014a8/r3tmp/yandex8sJoqQ.tmp 2026-01-08T15:33:33.438859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0014a8/r3tmp/yandex8sJoqQ.tmp 2026-01-08T15:33:33.439019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0014a8/r3tmp/yandex8sJoqQ.tmp 2026-01-08T15:33:33.439098Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:33.562876Z INFO: TTestServer started on Port 8182 GrpcPort 30145 PQClient connected to localhost:30145 2026-01-08T15:33:34.008099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:36.883774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014321836654596:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:36.883831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:37.179344Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014326060300615:2099];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.179417Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:37.294939Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014347606459711:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.295002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014347606459719:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.295043Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.295737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014347606459726:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.295801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.298855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:37.331509Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014347606459725:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:33:37.728600Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014347606459819:2994] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:37.760382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:37.777693Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014347535137395:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:37.778782Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=NjllZTFkMmQtZjlhZWRhMTYtN2M2MmU1MjYtZWYzMzlhMjM=, ActorId: [2:7593014347535137363:2302], ActorState: ExecuteState, LegacyTraceId: 01kef3vve8f6ax1c9q37yyjcza, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:37.783759Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014347606459832:2344], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:37.784136Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ZGRmMTFkN2MtOGY2ODIyNS04YmYyMjJiMi05M2I1NmIxNQ==, ActorId: [1:7593014347606459709:2331], ActorState: ExecuteState, LegacyTraceId: 01kef3vvcdawhske94z6rzxaw0, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or y ... : SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:19.829706Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|30d94de-60285f01-ae497050-e7b1f2a5_0 grpc read done: success: 0 data: 2026-01-08T15:34:19.829734Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|30d94de-60285f01-ae497050-e7b1f2a5_0 grpc read failed 2026-01-08T15:34:19.829764Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|30d94de-60285f01-ae497050-e7b1f2a5_0 grpc closed 2026-01-08T15:34:19.829783Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|30d94de-60285f01-ae497050-e7b1f2a5_0 is DEAD 2026-01-08T15:34:19.829943Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_17414571000877631973_v1 grpc read done: success# 0, data# { } 2026-01-08T15:34:19.829960Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_17414571000877631973_v1 grpc read failed 2026-01-08T15:34:19.829985Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_17414571000877631973_v1 grpc closed 2026-01-08T15:34:19.830032Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_3_1_17414571000877631973_v1 is DEAD 2026-01-08T15:34:19.830726Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:34:19.831446Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037894] Destroy direct read session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.831485Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [3:7593014526778846635:2529] destroyed 2026-01-08T15:34:19.831518Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037896] server disconnected, pipe [3:7593014526778846760:2538] destroyed 2026-01-08T15:34:19.831533Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037896] Destroy direct read session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.831549Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037896] server disconnected, pipe [3:7593014526778846636:2527] destroyed 2026-01-08T15:34:19.831580Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037896][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:34:19.831608Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.831622Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.831633Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.831648Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.831659Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:34:19.831697Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.831728Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014526778846634:2528] destroyed 2026-01-08T15:34:19.831771Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.831786Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.831798Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.832223Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014526778846626:2524] disconnected. 2026-01-08T15:34:19.832256Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014526778846626:2524] disconnected; active server actors: 1 2026-01-08T15:34:19.832274Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014526778846626:2524] client user disconnected session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.832350Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [3:7593014526778846627:2524] disconnected. 2026-01-08T15:34:19.832367Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [3:7593014526778846627:2524] disconnected; active server actors: 1 2026-01-08T15:34:19.832388Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [3:7593014526778846627:2524] client user disconnected session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.832450Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [3:7593014526778846625:2524] disconnected. 2026-01-08T15:34:19.832474Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [3:7593014526778846625:2524] disconnected; active server actors: 1 2026-01-08T15:34:19.832486Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [3:7593014526778846625:2524] client user disconnected session shared/user_3_1_17414571000877631973_v1 2026-01-08T15:34:19.852767Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.852797Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.852811Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.852830Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.852842Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:19.897429Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.897465Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.897479Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.897518Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.897530Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:34:19.921949Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.921979Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.921992Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.922012Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.922022Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:34:19.953521Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.953555Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.953581Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.953601Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.953612Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:19.998531Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:19.998577Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.998594Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:19.998615Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:19.998629Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:34:20.023931Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:20.023963Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:20.023979Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:20.023999Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:20.024013Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:34:20.591655Z node 3 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [3:7593014531073814108:2545] TxId: 281474976715687. Ctx: { TraceId: 01kef3x57y5cgeh41bzpxxkpd6, Database: /Root, SessionId: ydb://session/3?node_id=3&id=MTBlY2I1N2MtODc1Yzg4NjgtZjVmNWRkNjktNWNlNjU5MTM=, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 trace_id# 2026-01-08T15:34:20.592474Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [3:7593014531073814115:2545], TxId: 281474976715687, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3x57y5cgeh41bzpxxkpd6. RunScriptActorId : [0:0:0]. CustomerSuppliedId : . PoolId : default. SessionId : ydb://session/3?node_id=3&id=MTBlY2I1N2MtODc1Yzg4NjgtZjVmNWRkNjktNWNlNjU5MTM=. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [3:7593014531073814108:2545], status: UNAVAILABLE, reason: {
: Error: Terminate execution } |87.3%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> KqpRboYql::OrderBy [GOOD] >> KqpRboYql::LeftJoins >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] >> BasicUsage::CloseWriteSessionImmediately [GOOD] |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |87.3%| [TM] {BAZEL_UPLOAD} ydb/services/ext_index/ut/unittest |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query |87.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_stats/unittest >> DataShardStats::CollectKeySampleFollower [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:27:49.562252Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:27:51.597684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:27:51.603614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:27:51.606159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:27:56.816344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:27:56.816453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:27:57.231444Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886049314501 != 1767886049314505 2026-01-08T15:27:57.547608Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:27:58.033253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:27:58.299805Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:28:00.513296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:28:00.668737Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:28:01.213267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:28:01.606145Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:28:01.618978Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:28:01.619227Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:28:01.619433Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:28:02.159014Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:28:02.166164Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:28:02.167984Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:28:02.211837Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:28:02.212934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:28:02.213828Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:28:02.217858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:28:02.218513Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:28:02.219885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:28:02.236054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:28:02.633946Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:28:02.635516Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:28:02.637486Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:28:02.646799Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:28:02.647296Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:28:02.648160Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:28:02.650290Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:02.651732Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:28:02.654122Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:28:02.655057Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:28:02.655536Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:28:02.656473Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:28:02.656935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:28:02.656971Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:28:02.657002Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:28:02.657035Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:28:02.657078Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:28:02.661426Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:02.661869Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:28:02.662881Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:28:02.664655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:28:02.665501Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:28:02.666833Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:28:02.670649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:28:02.671664Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:28:02.672210Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:28:02.673020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:28:02.674219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:28:02.675162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:28:02.675655Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:28:02.683516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:28:02.683562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:28:02.684508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:28:02.685011Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:28:02.685890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:28:02.685915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:28:02.686370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:28:02.686859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:28:02.687706Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:28:02.710274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:28:02.710341Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:28:02.729033Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:28:02.729977Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... 46644480 FollowerId: 1 2026-01-08T15:34:09.563619Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [16:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:09.574147Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [16:924:2786]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:09.574249Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3312: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:10.428210Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [16:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:10.428516Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3473: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 38 2026-01-08T15:34:10.432848Z node 16 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2026-01-08T15:34:10.432922Z node 16 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 0, front# 0 2026-01-08T15:34:10.433518Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 268828680, Sender [16:916:2782], Recipient [16:924:2786]: NKikimr::TEvTablet::TEvFUpdate 2026-01-08T15:34:10.433894Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 268828680, Sender [16:916:2782], Recipient [16:924:2786]: NKikimr::TEvTablet::TEvFUpdate 2026-01-08T15:34:10.434668Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 268828683, Sender [16:877:2759], Recipient [16:886:2765]: NKikimr::TEvTablet::TEvFollowerGcApplied 2026-01-08T15:34:10.445716Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [16:924:2786]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:10.445821Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3312: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:10.446006Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3473: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 1, tableId 38 2026-01-08T15:34:11.359816Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [16:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2026-01-08T15:34:11.359924Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3179: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2026-01-08T15:34:11.360058Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 76000 last cleanup 0 2026-01-08T15:34:11.360131Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:34:11.360186Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:34:11.360266Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:34:11.360327Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:34:11.360518Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [16:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:11.372382Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [16:924:2786]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:11.372465Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3312: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2026-01-08T15:34:12.715680Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553215, Sender [16:1626:3417], Recipient [16:924:2786]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 KeysSize: 3 2026-01-08T15:34:12.715823Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3305: StateWorkAsFollower, processing event TEvDataShard::TEvRead 2026-01-08T15:34:12.716013Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 1 2026-01-08T15:34:12.716098Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037888 changed HEAD read to repeatable v2500/18446744073709551615 2026-01-08T15:34:12.716201Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2026-01-08T15:34:12.716361Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:34:12.716442Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:34:12.716505Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:34:12.716560Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:34:12.716612Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2026-01-08T15:34:12.716676Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:34:12.716710Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:34:12.716735Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:34:12.716762Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:34:12.716912Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:34:12.723177Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Restart 2026-01-08T15:34:12.723275Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Restart at tablet# 72075186224037888 2026-01-08T15:34:12.727839Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 1 2026-01-08T15:34:12.727934Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:34:12.728093Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 2, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:34:12.728467Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[16:1626:3417], 0} after executionsCount# 2 2026-01-08T15:34:12.728573Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[16:1626:3417], 0} sends rowCount# 3, bytes# 48, quota rows left# 998, quota bytes left# 5242832, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2026-01-08T15:34:12.728723Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[16:1626:3417], 0} finished in read 2026-01-08T15:34:12.728841Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:34:12.728883Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:34:12.728919Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:34:12.728958Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:34:12.729014Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:34:12.729044Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:34:12.729086Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:5] at 72075186224037888 has finished 2026-01-08T15:34:12.729151Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:34:12.729324Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:34:12.730893Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553219, Sender [16:1626:3417], Recipient [16:924:2786]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:34:12.730968Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3308: StateWorkAsFollower, processing event TEvDataShard::TEvReadCancel 2026-01-08T15:34:12.731077Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 42 } }, { items { uint32_value: 44 } }, { items { uint32_value: 46 } } TEST 9: EvGetTableStats(collectKeySample=false) after the collected key sample becomes invalid TEST Sending the EvGetTableStats message to the tablet 72075186224037888, tableId=38, collectKeySample=0, toFollower=1 2026-01-08T15:34:12.733729Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269877761, Sender [16:1629:3421], Recipient [16:924:2786]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:34:12.733827Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3303: StateWorkAsFollower, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:34:12.733933Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at follower 1 tablet# 72075186224037888, clientId# [16:1627:3419], serverId# [16:1629:3421], sessionId# [0:0:0] 2026-01-08T15:34:12.734163Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3295: StateWorkAsFollower, received event# 269553160, Sender [16:829:2724], Recipient [16:924:2786]: NKikimrTxDataShard.TEvGetTableStats TableId: 38 CollectKeySample: false TEST Received the TEvGetTableStatsResult response from the tablet 72075186224037888, tableId=38, collectKeySample=0, toFollower=1 DatashardId: 72075186224037888 TableLocalId: 38 TableStats { InMemSize: 0 LastAccessTime: 80536 LastUpdateTime: 0 } TabletMetrics { CPU: 186 } FullStatsReady: false TableOwnerId: 72057594046644480 FollowerId: 1 |87.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_stats/unittest >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query >> YdbTableSplit::SplitByLoadWithReads >> YdbTableSplit::SplitByLoadWithDeletes >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads >> KqpRboPg::FallbackToYqlDisabled [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::CloseWriteSessionImmediately [GOOD] Test command err: 2026-01-08T15:33:50.592034Z :BasicWriteSession INFO: Random seed for debugging is 1767886430591986 2026-01-08T15:33:51.074299Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014406709498761:2208];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:51.079849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:51.102604Z node 1 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.005336s 2026-01-08T15:33:51.130343Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:51.143713Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014405576392979:2077];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:51.143794Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:51.420828Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:51.681770Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:51.680730Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:51.886651Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:51.926867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:51.926967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:51.972177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:51.972247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:52.060917Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:52.067338Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:52.069700Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:52.184287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:52.215704Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:52.230815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:52.316120Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:52.434716Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:52.541555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/000215/r3tmp/yandexXOA7UU.tmp 2026-01-08T15:33:52.541585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/000215/r3tmp/yandexXOA7UU.tmp 2026-01-08T15:33:52.541776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/000215/r3tmp/yandexXOA7UU.tmp 2026-01-08T15:33:52.541878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:52.663387Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:52.697277Z INFO: TTestServer started on Port 23556 GrpcPort 20748 PQClient connected to localhost:20748 2026-01-08T15:33:53.214043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:56.074437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014406709498761:2208];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:56.074508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:56.144244Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014405576392979:2077];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:56.144325Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:57.002881Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014427051229790:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.002906Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014427051229801:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.003077Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.005332Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014431346197103:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.005447Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:57.013151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:57.038267Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014431346197102:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2026-01-08T15:33:57.190843Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014431346197132:2141] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:57.629878Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014432479303796:2339], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:57.630783Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=NjQxNTU2OGItM2I3Y2MyOWUtZTdhOTMwYi1mMmJmZjU4MQ==, ActorId: [1:7593014432479303743:2329], ActorState: ExecuteState, LegacyTraceId: 01kef3wepb9gr6c2k01fwsxmqe, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:57.633034Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:33:57.632637Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014431346197141:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:57.634115Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=OTMwNjRiZDAtMmU4OWVhOGItMzk5YzlhZGMtZjJhYmYzMmM=, ActorId: [2:7593014427051229782:2303], ActorState: ExecuteState, LegacyTraceId: 01kef3wem2e9kj0errq0jmdbjg, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 ... UE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014541064660828:2467] disconnected; active server actors: 1 2026-01-08T15:34:22.260465Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1697: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014541064660828:2467] disconnected no session 2026-01-08T15:34:22.314628Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.314664Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.314679Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.314703Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.314728Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:22.376098Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7593014541064660788:2467] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2026-01-08T15:34:22.376139Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7593014541064660788:2467] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2026-01-08T15:34:22.376157Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7593014541064660788:2467] (SourceId=src, PreferedPartition=(NULL)) Start idle 2026-01-08T15:34:22.376185Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:707: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2026-01-08T15:34:22.378498Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:839: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2026-01-08T15:34:22.380114Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:884: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|46dfbf0a-f864e96c-41541516-33cc10dd_0 2026-01-08T15:34:22.378239Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72075186224037892] server connected, pipe [3:7593014541064660853:2467], now have 1 active actors on pipe 2026-01-08T15:34:22.378852Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:147: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2026-01-08T15:34:22.378878Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2698: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2026-01-08T15:34:22.378954Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|46dfbf0a-f864e96c-41541516-33cc10dd_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2026-01-08T15:34:22.378994Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.379009Z node 4 :PERSQUEUE DEBUG: partition.cpp:2423: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxEvent(TMessage) 2026-01-08T15:34:22.382474Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1767886462382 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:22.379032Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 1, PendingWrites: 0 2026-01-08T15:34:22.379044Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.379066Z node 4 :PERSQUEUE DEBUG: partition.cpp:2487: [72075186224037892][Partition][0][StateIdle] TPartition::ProcessUserActionAndTxPendingCommit(TMessage) 2026-01-08T15:34:22.379095Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 1 2026-01-08T15:34:22.382601Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|46dfbf0a-f864e96c-41541516-33cc10dd_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2026-01-08T15:34:22.379108Z node 4 :PERSQUEUE DEBUG: partition.cpp:2342: [72075186224037892][Partition][0][StateIdle] Batch completed (1) 2026-01-08T15:34:22.379119Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:22.382760Z :INFO: [] MessageGroupId [src] SessionId [src|46dfbf0a-f864e96c-41541516-33cc10dd_0] Write session: close. Timeout = 0 ms 2026-01-08T15:34:22.382809Z :INFO: [] MessageGroupId [src] SessionId [src|46dfbf0a-f864e96c-41541516-33cc10dd_0] Write session will now close 2026-01-08T15:34:22.382838Z :DEBUG: [] MessageGroupId [src] SessionId [src|46dfbf0a-f864e96c-41541516-33cc10dd_0] Write session: aborting 2026-01-08T15:34:22.383165Z :INFO: [] MessageGroupId [src] SessionId [src|46dfbf0a-f864e96c-41541516-33cc10dd_0] Write session: gracefully shut down, all writes complete 2026-01-08T15:34:22.383206Z :DEBUG: [] MessageGroupId [src] SessionId [src|46dfbf0a-f864e96c-41541516-33cc10dd_0] Write session: destroy 2026-01-08T15:34:22.383818Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|46dfbf0a-f864e96c-41541516-33cc10dd_0 grpc read done: success: 0 data: 2026-01-08T15:34:22.383837Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|46dfbf0a-f864e96c-41541516-33cc10dd_0 grpc read failed 2026-01-08T15:34:22.383862Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|46dfbf0a-f864e96c-41541516-33cc10dd_0 grpc closed 2026-01-08T15:34:22.383877Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|46dfbf0a-f864e96c-41541516-33cc10dd_0 is DEAD 2026-01-08T15:34:22.384637Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:34:22.379160Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:37: [72075186224037892][Partition][0][StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2026-01-08T15:34:22.379207Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:34:22.379753Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:147: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2026-01-08T15:34:22.379772Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2698: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2026-01-08T15:34:22.379844Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2026-01-08T15:34:22.386187Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014541064660853:2467] destroyed 2026-01-08T15:34:22.386222Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:34:22.386246Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.386260Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.386272Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.386288Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.386298Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:22.418581Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.418621Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.418645Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.418706Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.418724Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist Session was created 2026-01-08T15:34:22.521908Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.521946Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.521960Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.521984Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.521998Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:22.622432Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:22.622462Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.622475Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:22.622493Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:22.622503Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:23.139860Z node 3 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [3:7593014545359628197:2479] TxId: 281474976710676. Ctx: { TraceId: 01kef3x7mxdcvmkmsxzvey1bj5, Database: /Root, SessionId: ydb://session/3?node_id=3&id=MWNmYmUzMzAtMzllNThmMDgtY2JkMTlkNS1mOTZiNzZlMw==, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 trace_id# 2026-01-08T15:34:23.139999Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [3:7593014545359628201:2479], TxId: 281474976710676, task: 3. Ctx: { CheckpointId : . TraceId : 01kef3x7mxdcvmkmsxzvey1bj5. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=3&id=MWNmYmUzMzAtMzllNThmMDgtY2JkMTlkNS1mOTZiNzZlMw==. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [3:7593014545359628197:2479], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2026-01-08T15:34:23.380061Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:34:23.380116Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |87.3%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> TContinuousBackupWithRebootsTests::TakeSeveralIncrementalBackups >> KqpRboPg::Bench_CrossFilter [GOOD] >> KqpRboPg::Bench_JoinFilter |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |87.3%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query [GOOD] >> KqpBatchDelete::ColumnTable >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/slow/ydb-public-sdk-cpp-src-client-topic-ut-slow |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/slow/ydb-public-sdk-cpp-src-client-topic-ut-slow >> KqpBatchDelete::SimpleOnePartition >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query >> TxUsage::WriteToTopic_Demo_47_Table >> TTxDataShardReshuffleKMeansScan::BuildToPosting [GOOD] |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |87.3%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/slow/ydb-public-sdk-cpp-src-client-topic-ut-slow >> TTxDataShardReshuffleKMeansScan::BuildToBuild >> KqpBatchDelete::ManyPartitions_3 >> KqpBatchUpdate::SimplePartitions >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [GOOD] |87.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboPg::FallbackToYqlDisabled [GOOD] Test command err: Trying to start YDB, gRPC: 18281, MsgBus: 12541 2026-01-08T15:34:00.105445Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014445217570312:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:00.105532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:00.489388Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:00.496011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:00.496159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:00.537075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:00.593140Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:00.595659Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014445217570275:2081] 1767886440101517 != 1767886440101520 2026-01-08T15:34:00.656126Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:00.731901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:00.731934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:00.731948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:00.732039Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:01.146508Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:01.349586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:01.361676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:03.991572Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014458102473057:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.991711Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.992363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014458102473067:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.992428Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.270183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:04.489924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014462397440468:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.490021Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.490377Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014462397440473:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.490484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014462397440474:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.490604Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:04.495797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:04.512153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2026-01-08T15:34:04.513066Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014462397440477:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:34:04.635245Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014462397440528:2609] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:05.104386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014445217570312:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:05.104443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4452, MsgBus: 3284 2026-01-08T15:34:05.752414Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014465893397022:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:05.760256Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:05.849322Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:06.036662Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:06.060646Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:06.063080Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:06.063146Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:06.067873Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014465893396997:2081] 1767886445750894 != 1767886445750897 2026-01-08T15:34:06.124167Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:06.350983Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:06.351014Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:06.351026Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:06.351115Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:06.412668Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:06.777047Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:07.006749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:09.782860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014483073267074:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:09.783085Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:09.792980Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014483073267104:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:09.793078Z node 2 :KQP_WORKLOAD_SER ... und or you don't have access permissions } 2026-01-08T15:34:16.349305Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:16.361590Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2026-01-08T15:34:16.362444Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014514715322985:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:34:16.457498Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014514715323036:2600] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:16.504217Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7593014493240485672:2177];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:16.504290Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:16.539537Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:662: Compilation with new RBO failed, retrying with YQL optimizer, self: [3:7593014514715323045:2347], database: /Root, text: "\n --!syntax_pg\n SET TablePathPrefix = \"/Root/\";\n select sum(distinct t1.c), sum(distinct t1.a) from t1 group by t1.b order by t1.b;\n " 2026-01-08T15:34:18.945773Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:662: Compilation with new RBO failed, retrying with YQL optimizer, self: [3:7593014523305257708:2374], database: /Root, text: "\n INSERT INTO `/Root/t1` (a, b, c) VALUES (1, 2, 3);\n " Trying to start YDB, gRPC: 29116, MsgBus: 30056 2026-01-08T15:34:20.397533Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014529417139170:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:20.398331Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:20.433137Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:20.654404Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:20.658495Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:20.700510Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:20.700600Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:20.712072Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:20.870183Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:20.911110Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:20.911136Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:20.911147Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:20.911242Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:21.430664Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:21.691456Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:21.697946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:24.990505Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014546597009205:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.990675Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.996053Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014546597009215:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.996188Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.020725Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:25.202184Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014550891976607:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.202329Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.203182Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014550891976612:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.203248Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014550891976613:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.203623Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.208243Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:25.222588Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014550891976616:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:34:25.289004Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014550891976667:2596] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:25.352522Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [4:7593014550891976676:2347], status: INTERNAL_ERROR, issues:
: Fatal: Execution, code: 1060
:4:1: Fatal: ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:1110: Multiple distinct is not supported, code: 1 2026-01-08T15:34:25.353123Z node 4 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=4&id=OWZhNDBmNzctOTA3OTZlYTctZDQxMTc5NDItZjQ5ZWZjY2U=, ActorId: [4:7593014550891976604:2336], ActorState: ExecuteState, LegacyTraceId: 01kef3xa5hdwjryfrkhwkhcyxv, ReplyQueryCompileError, remove tx status# INTERNAL_ERROR issues# { message: "Execution" issue_code: 1060 issues { position { row: 4 column: 1 } message: "ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:1110: Multiple distinct is not supported" end_position { row: 4 column: 1 } issue_code: 1 } } tx_id# trace_id# 2026-01-08T15:34:25.401774Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014529417139170:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:25.401857Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:25.424296Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [4:7593014550891976701:2357], status: INTERNAL_ERROR, issues:
: Fatal: Execution, code: 1060
: Fatal: yql/essentials/core/expr_nodes_gen/yql_expr_nodes_gen.h:294 TCallable(): requirement Match(node.Get()) failed, code: 1 2026-01-08T15:34:25.424882Z node 4 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=4&id=NzAyODc4NDEtNzYyOGIzYmItZTZhNTkxNDAtOWQ0NjczNDE=, ActorId: [4:7593014550891976693:2352], ActorState: ExecuteState, LegacyTraceId: 01kef3xabea3rdpzfbene2nsz5, ReplyQueryCompileError, remove tx status# INTERNAL_ERROR issues# { message: "Execution" issue_code: 1060 issues { message: "yql/essentials/core/expr_nodes_gen/yql_expr_nodes_gen.h:294 TCallable(): requirement Match(node.Get()) failed" issue_code: 1 } } tx_id# trace_id# |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 >> BasicUsage::PreferredDatabaseNoFallback [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 29113, MsgBus: 26123 2026-01-08T15:31:28.744825Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013792870845286:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:28.744876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:29.179554Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:29.187124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:29.187252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:29.256991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:29.300801Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:29.302305Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013792870845263:2081] 1767886288739182 != 1767886288739185 2026-01-08T15:31:29.352156Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:29.373888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:29.373911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:29.373918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:29.374020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:29.834995Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:30.223663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:30.241941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2026-01-08T15:31:33.121183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:33.382161Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31:33.408103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:33.414701Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976710660 at tablet 72075186224037888 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710660] at 72075186224037888 while waiting for scan finish) | 2026-01-08T15:31:33.420931Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710660 at tablet 72075186224037888 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976710660] at 72075186224037888 while waiting for scan finish) | 2026-01-08T15:31:33.530759Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31:33.587624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814345682819:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.587736Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.588514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814345682831:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.588573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814345682832:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.588903Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.593176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710663:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:33.619926Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013814345682835:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710663 completed, doublechecking } 2026-01-08T15:31:33.750266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013792870845286:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:33.750362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:33.755139Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013814345682886:2658] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } f f t t 18 2026-01-08T15:31:34.536422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:34.623496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:34.696316Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2026-01-08T15:31:35.251282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:35.358406Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31:35.364587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2026-01-08T15:31:35.944727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.023709Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31:36.028540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2026-01-08T15:31:36.606892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.677041Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31:36.684619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.795708Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 700 2026-01-08T15:31:37.461085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.561071Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2026-01-08T15:31: ... bscription [11:7593014474330570420:2081] 1767886447156635 != 1767886447156638 2026-01-08T15:34:07.570527Z node 11 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:07.577227Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:07.577359Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:07.579690Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:07.616827Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:07.867259Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:07.867297Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:07.867310Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:07.867458Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:07.905169Z node 11 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:08.193993Z node 11 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:08.680110Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:08.695253Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:12.190204Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7593014474330570648:2247];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:12.190326Z node 11 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:13.929371Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014500100375091:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:13.929465Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:13.929934Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014500100375104:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:13.930006Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7593014500100375103:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:13.930011Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:13.936507Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:13.954495Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7593014500100375107:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:34:14.049188Z node 11 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [11:7593014504395342454:2546] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:14.101550Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:14.982672Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) Trying to start YDB, gRPC: 18716, MsgBus: 22346 2026-01-08T15:34:17.972450Z node 12 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7593014517444530047:2081];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:17.973145Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:18.106719Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:18.373866Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:18.377850Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:18.378016Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:18.384584Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:18.420837Z node 12 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:18.744334Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:18.744364Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:18.744379Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:18.744505Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:18.831019Z node 12 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:19.031057Z node 12 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:19.928022Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:22.975859Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7593014517444530047:2081];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:22.975957Z node 12 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:25.933409Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014551804269266:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.933571Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.934109Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7593014551804269279:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:25.940073Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:25.964546Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7593014551804269281:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:34:26.021129Z node 12 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [12:7593014556099236643:2550] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:26.153133Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink [GOOD] >> TKeyValueTest::TestVacuumOnEmptyTablet [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink >> TKeyValueTest::TestVacuumOnEmptyTabletResetGeneration >> TKeyValueTest::TestVacuumOnEmptyTabletResetGeneration [GOOD] |87.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |87.3%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test |87.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |87.3%| [TM] {RESULT} ydb/core/tx/datashard/ut_stats/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> KqpRboPg::Bench_JoinFilter [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestVacuumOnEmptyTabletResetGeneration [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:59:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:59:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:78:2057] recipient: [2:39:2086] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:81:2057] recipient: [2:80:2112] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:83:2057] recipient: [2:80:2112] !Reboot 72057594037927937 (actor [2:58:2099]) rebooted! !Reboot 72057594037927937 (actor [2:58:2099]) tablet resolver refreshed! new actor is[2:82:2113] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:198:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:59:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:78:2057] recipient: [3:39:2086] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:81:2057] recipient: [3:80:2112] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:83:2057] recipient: [3:80:2112] !Reboot 72057594037927937 (actor [3:58:2099]) rebooted! !Reboot 72057594037927937 (actor [3:58:2099]) tablet resolver refreshed! new actor is[3:82:2113] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:198:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:59:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:79:2057] recipient: [4:39:2086] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:82:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:84:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:58:2099]) rebooted! !Reboot 72057594037927937 (actor [4:58:2099]) tablet resolver refreshed! new actor is[4:83:2113] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:199:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:59:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:82:2057] recipient: [5:39:2086] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:85:2057] recipient: [5:84:2115] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:87:2057] recipient: [5:84:2115] !Reboot 72057594037927937 (actor [5:58:2099]) rebooted! !Reboot 72057594037927937 (actor [5:58:2099]) tablet resolver refreshed! new actor is[5:86:2116] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:202:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:59:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2099]) on event NKikimr::TEvKeyValue::TEvVacuumRequest ! Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:82:2057] recipient: [6:39:2086] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:85:2057] recipient: [6:84:2115] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:87:2057] recipient: [6:84:2115] !Reboot 72057594037927937 (actor [6:58:2099]) rebooted! !Reboot 72057594037927937 (actor [6:58:2099]) tablet resolver refreshed! new actor is[6:86:2116] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:202:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:59:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2099]) on event NKikimr::TEvKeyValue::TEvForceTabletVacuum ! Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:82:2057] recipient: [7:39:2086] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:85:2057] recipient: [7:84:2115] Leader for TabletID 72057594037927937 is [7:86:2116] sender: [7:87:2057] recipient: [7:84:2115] !Reboot 72057594037927937 (actor [7:58:2099]) rebooted! !Reboot 72057594037927937 (actor [7:58:2099]) tablet resolver refreshed! new actor is[7:86:2116] Leader for TabletID 72057594037927937 is [7:86:2116] sender: [7:202:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:59:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2099]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:87:2057] recipient: [8:39:2086] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:90:2057] recipient: [8:89:2119] Leader for TabletID 72057594037927937 is [8:91:2120] sender: [8:92:2057] recipient: [8:89:2119] !Reboot 72057594037927937 (actor [8:58:2099]) rebooted! !Reboot 72057594037927937 (actor [8:58:2099]) tablet resolver refreshed! new actor is[8:91:2120] Leader for TabletID 72057594037927937 is [8:91:2120] sender: [8:207:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:59:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:91:2057] recipient: [9:39:2086] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:94:2057] recipient: [9:93:2123] Leader for TabletID 72057594037927937 is [9:95:2124] sender: [9:96:2057] recipient: [9:93:2123] !Reboot 72057594037927937 (actor [9:58:2099]) rebooted! !Reboot 72057594037927937 (actor [9:58:2099]) tablet resolver refreshed! new actor is[9:95:2124] Leader for TabletID 72057594037927937 is [9:95:2124] sender: [9:211:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:59:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2099]) on event NKikimr::TEvKeyValue::TEvVacuumRequest ! Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:91:2057] recipient: [10:39:2086] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:94:2057] recipient: [10:93:2123] Leader for TabletID 72057594037927937 is [10:95:2124] sender: [10:96:2057] recipient: [10:93:2123] !Reboot 72057594037927937 (actor [10:58:2099]) rebooted! !Reboot 72057594037927937 (actor [10:58:2099]) tablet resolver refreshed! new actor is[10:95:2124] Leader for TabletID 72057594037927937 is [10:95:2124] sender: [10:211:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:59:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:93:2057] recipient: [11:39:2086] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:96:2057] recipient: [11:95:2125] Leader for TabletID 72057594037927937 is [11:97:2126] sender: [11:98:2057] recipient: [11:95:2125] !Reboot 72057594037927937 (actor [11:58:2099]) rebooted! !Reboot 72057594037927937 (actor [11:58:2099]) tablet resolver refreshed! new actor is[11:97:2126] Leader for TabletID 72057594037927937 is [11:97:2126] sender: [11:213:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:59:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2099]) on event NKikimr::TEvKeyValue::TEvVacuumRequest ! Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:93:2057] recipient: [12:39:2086] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:96:2057] recipient: [12:95:2125] Leader for TabletID 72057594037927937 is [12:97:2126] sender: [12:98:2057] recipient: [12:95:2125] !Reboot 72057594037927937 (actor [12:58:2099]) rebooted! !Reboot 72057594037927937 (actor [12:58:2099]) tablet resolver refreshed! new actor is[12:97:2126] Leader for TabletID 72057594037927937 is [12:97:2126] sender: [12:213:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:59:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:76:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:58:2099]) on event NKikimr::TEvKeyValue::TEvForceTabletVacuum ! Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:93:2057] recipient: [13:39:2086] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:96:2057] recipient: [13:95:2125] Leader for TabletID 72057594037927937 is [13:97:2126] sender: [13:98:2057] recipient: [13:95:2125] !Reboot 72057594037927937 (actor [13:58:2099]) rebooted! !Reboot 72057594037927937 (actor [13:58:2099]) tablet resolver refreshed! new actor is[13:97:2126] Leader for TabletID 72057594037927937 is [13:97:2126] sender: [13:213:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:56:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:59:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:76:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:58:2099]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:98:2057] recipient: [14:39:2086] Leader for TabletID 72057594037927937 is [14:58:2099] sender: [14:101:2057] recipient: [14:100:2129] Leader for TabletID 72057594037927937 is [14:102:2130] sender: [14:103:2057] recipient: [14:100:2129] !Reboot 72057594037927937 (actor [14:58:2099]) rebooted! !Reboot 72057594037927937 (actor [14:58:2099]) tablet resolver refreshed! new actor is[14:102:2130] Leader for TabletID 72057594037927937 is [14:102:2130] sender: [14:218:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:56:2057] recipient: [15:52:2097] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:59:2057] recipient: [15:52:2097] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:76:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:102:2057] recipient: [15:39:2086] Leader for TabletID 72057594037927937 is [15:58:2099] sender: [15:105:2057] recipient: [15:104:2133] Leader for TabletID 72057594037927937 is [15:106:2134] sender: [15:107:2057] recipient: [15:104:2133] !Reboot 72057594037927937 (actor [15:58:2099]) rebooted! !Reboot 72057594037927937 (actor [15:58:2099]) tablet resolver refreshed! new actor is[15:106:2134] Leader for TabletID 72057594037927937 is [15:106:2134] sender: [15:222:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:56:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:59:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:76:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:58:2099]) on event NKikimr::TEvKeyValue::TEvVacuumRequest ! Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:102:2057] recipient: [16:39:2086] Leader for TabletID 72057594037927937 is [16:58:2099] sender: [16:105:2057] recipient: [16:104:2133] Leader for TabletID 72057594037927937 is [16:106:2134] sender: [16:107:2057] recipient: [16:104:2133] !Reboot 72057594037927937 (actor [16:58:2099]) rebooted! !Reboot 72057594037927937 (actor [16:58:2099]) tablet resolver refreshed! new actor is[16:106:2134] Leader for TabletID 72057594037927937 is [16:106:2134] sender: [16:222:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:56:2057] recipient: [17:53:2097] Leader for TabletID 72057594037927937 is [17:58:2099] sender: [17:59:2057] recipient: [17:53:2097] Leader for TabletID 72057594037927937 is [17:58:2099] sender: [17:76:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [17:58:2099] sender: [17:104:2057] recipient: [17:39:2086] Leader for TabletID 72057594037927937 is [17:58:2099] sender: [17:107:2057] recipient: [17:106:2135] Leader for TabletID 72057594037927937 is [17:108:2136] sender: [17:109:2057] recipient: [17:106:2135] !Reboot 72057594037927937 (actor [17:58:2099]) rebooted! !Reboot 72057594037927937 (actor [17:58:2099]) tablet resolver refreshed! new actor is[17:108:2136] Leader for TabletID 72057594037927937 is [17:108:2136] sender: [17:224:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:56:2057] recipient: [18:52:2097] Leader for TabletID 72057594037927937 is [18:58:2099] sender: [18:59:2057] recipient: [18:52:2097] Leader for TabletID 72057594037927937 is [18:58:2099] sender: [18:76:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:58:2099]) on event NKikimr::TEvKeyValue::TEvVacuumRequest ! Leader for TabletID 72057594037927937 is [18:58:2099] sender: [18:104:2057] recipient: [18:39:2086] Leader for TabletID 72057594037927937 is [18:58:2099] sender: [18:107:2057] recipient: [18:106:2135] Leader for TabletID 72057594037927937 is [18:108:2136] sender: [18:109:2057] recipient: [18:106:2135] !Reboot 72057594037927937 (actor [18:58:2099]) rebooted! !Reboot 72057594037927937 (actor [18:58:2099]) tablet resolver refreshed! new actor is[18:108:2136] Leader for TabletID 72057594037927937 is [18:108:2136] sender: [18:224:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:56:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:56:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:58:2099] sender: [19:59:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:58:2099] sender: [19:76:2057] recipient: [19:14:2061] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::PreferredDatabaseNoFallback [GOOD] Test command err: 2026-01-08T15:33:31.804717Z :GetAllStartPartitionSessions INFO: Random seed for debugging is 1767886411804671 2026-01-08T15:33:32.515753Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014325080710845:2089];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:32.515812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.661323Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:32.729532Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:32.853769Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:33.016041Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.139565Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.139704Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:33.202588Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.298733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.298844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.300859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.300926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.386717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.394597Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:33.407818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.409852Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:33.431671Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.467027Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.021459s 2026-01-08T15:33:33.624082Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.754428Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.864186Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.922575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/00136c/r3tmp/yandexZQtPzY.tmp 2026-01-08T15:33:33.922606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/00136c/r3tmp/yandexZQtPzY.tmp 2026-01-08T15:33:33.922766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/00136c/r3tmp/yandexZQtPzY.tmp 2026-01-08T15:33:33.922859Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:34.007201Z INFO: TTestServer started on Port 7790 GrpcPort 20063 PQClient connected to localhost:20063 2026-01-08T15:33:34.534337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:37.518697Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014325080710845:2089];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.518767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:37.755052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014346555548636:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.755184Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.755731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014346555548649:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.755819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014346555548650:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.755904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.760930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:37.763161Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014345231605369:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.763240Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014345231605358:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.763446Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.773428Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014345231605374:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.773530Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:37.846715Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014345231605373:2135] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:33:37.854515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014346555548653:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2026-01-08T15:33:37.931673Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014346555548747:2987] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:38.092798Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014345231605412:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:38.093105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:38.093585Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=M2M3NDU1N2ItMTU4NWFiZGYtNmY5MTQ2NWItMTYwMDQ5MWE=, ActorId: [2:7593014345231605355:2302], ActorState: ExecuteState, LegacyTraceId: 01kef3vvv150pp35p8v66fvqgw, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column ... 037892][Partition][0][StateIdle] Try persist === Waiting for repair >>> Ready to answer: ok === Closing the session 2026-01-08T15:34:32.037260Z :INFO: [/Root] [] [] Start federated write session to database 'dc2' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "fancy_datacenter" DbInfos: [ { name: "dc1" path: "/Root" id: "account-dc1" endpoint: "localhost:22710" location: "dc1" status: AVAILABLE weight: 1000 } { name: "dc2" path: "/Root" id: "account-dc2" endpoint: "localhost:22710" location: "dc2" status: AVAILABLE weight: 500 } { name: "dc3" path: "/Root" id: "account-dc3" endpoint: "localhost:22710" location: "dc3" status: AVAILABLE weight: 500 } ] ControlPlaneEndpoint: cp.logbroker-federation:2135 } 2026-01-08T15:34:32.050531Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.050564Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.050577Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.050595Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.050607Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.055030Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: try to update token 2026-01-08T15:34:32.055507Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Start write session. Will connect to nodeId: 0 2026-01-08T15:34:32.061637Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: write to message_group: src_id 2026-01-08T15:34:32.061777Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: send init request: init_request { path: "test-topic" message_group_id: "src_id" } 2026-01-08T15:34:32.062139Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnWriteDone gRpcStatusCode: 0 2026-01-08T15:34:32.062535Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2026-01-08T15:34:32.062574Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session will now close 2026-01-08T15:34:32.062640Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: aborting 2026-01-08T15:34:32.063062Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: gracefully shut down, all writes complete 2026-01-08T15:34:32.063122Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: destroy 2026-01-08T15:34:32.061990Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2026-01-08T15:34:32.062030Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2026-01-08T15:34:32.064836Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { path: "test-topic" message_group_id: "src_id" } 2026-01-08T15:34:32.065016Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:460: session request cookie: 2 path: "test-topic" message_group_id: "src_id" from ipv6:[::1]:50580 2026-01-08T15:34:32.065042Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1551: write session: cookie=2 sessionId= userAgent="topic server" ip=ipv6:[::1]:50580 proto=topic topic=test-topic durationSec=0 2026-01-08T15:34:32.065054Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:584: init check schema 2026-01-08T15:34:32.066745Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:645: session v1 cookie: 2 sessionId: describe result for acl check 2026-01-08T15:34:32.066851Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: grpc closed 2026-01-08T15:34:32.066865Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: is DEAD 2026-01-08T15:34:32.067753Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2026-01-08T15:34:32.067768Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2026-01-08T15:34:32.067777Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2026-01-08T15:34:32.067796Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7593014581589887330:2804] (SourceId=src_id, PreferedPartition=(NULL)) StartKqpSession 2026-01-08T15:34:32.153015Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.153054Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.153068Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.153092Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.153107Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.256207Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.256245Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.256257Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.256276Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.256288Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.356139Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.356176Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.356191Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.356210Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.356224Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.458442Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.458486Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.458504Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.458524Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.458537Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.558476Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.558515Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.558531Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.558552Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.558567Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.659102Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.659135Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.659149Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.659170Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.659185Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.762192Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.762231Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.762243Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.762275Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.762289Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:32.862494Z node 3 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:32.862528Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.862543Z node 3 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:32.862565Z node 3 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:32.862581Z node 3 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboPg::Bench_JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 9803, MsgBus: 16934 2026-01-08T15:34:05.966931Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014466168337362:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:05.968272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:06.231452Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:06.242038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:06.242171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:06.259856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:06.371985Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014466168337324:2081] 1767886445961487 != 1767886445961490 2026-01-08T15:34:06.378988Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:06.462427Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:06.512231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:06.512252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:06.512258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:06.512351Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:06.974884Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:07.188795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:10.212159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014487643174697:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:10.212264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014487643174705:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:10.212338Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:10.217135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014487643174712:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:10.217217Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:10.217830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:10.235777Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014487643174711:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2026-01-08T15:34:10.384024Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014487643174764:2538] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:10.963678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014466168337362:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:10.970675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6329, MsgBus: 12367 2026-01-08T15:34:12.597258Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014496201535049:2072];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:12.597310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:12.617416Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:12.710651Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014496201535007:2081] 1767886452594964 != 1767886452594967 2026-01-08T15:34:12.718284Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:12.718387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:12.744539Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:12.756937Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:12.882567Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:34:12.898578Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:34:12.907984Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:12.932614Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:12.932642Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:12.932651Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:12.932718Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:13.348668Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:13.595898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:13.603389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:13.612864Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:16.729471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014513381405082:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:16.729571Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:16.730730Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014513381405092:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:16.730816Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:16.985233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:17.151173Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014517676372483:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:17.151269Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:17.151441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014517676372488:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T ... 480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:24.435593Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:24.510749Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014547551440523:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.510871Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.511201Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014547551440528:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.511245Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014547551440529:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.511366Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:24.516149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:24.531893Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014547551440532:2349], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:34:24.605240Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014547551440583:2641] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:24.814422Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7593014526076603209:2254];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:24.814500Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15488, MsgBus: 16329 2026-01-08T15:34:27.442219Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014560943378032:2202];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:27.442274Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:27.482105Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:27.649691Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:27.654857Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:27.671266Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014560943377857:2081] 1767886467414780 != 1767886467414783 2026-01-08T15:34:27.675092Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:27.675375Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:27.697617Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:27.732661Z node 4 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639257 Duration# 0.005418s 2026-01-08T15:34:27.890425Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:27.930998Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:27.931025Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:27.931032Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:27.931119Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:28.465205Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:28.907214Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:28.915770Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:34:32.442903Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014560943378032:2202];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:32.445308Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:33.178487Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014586713182525:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.178616Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.179441Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014586713182535:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.179547Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.204450Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.314471Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.571126Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014586713182706:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.571213Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.571526Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014586713182711:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.571558Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014586713182712:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.571581Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.575738Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:33.586950Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014586713182715:2352], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2026-01-08T15:34:33.659971Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014586713182766:2645] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |87.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest >> KqpBatchDelete::ColumnTable [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> TContinuousBackupWithRebootsTests::Basic >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ColumnTable [GOOD] Test command err: Trying to start YDB, gRPC: 61059, MsgBus: 25843 2026-01-08T15:34:28.564869Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014565241885195:2213];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:28.565334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:28.839890Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:28.858721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:28.858892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:28.899698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:29.008647Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:29.066301Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:29.317813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:29.317836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:29.317842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:29.317908Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:29.570638Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:30.114678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:32.627828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014582421755064:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.628020Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.628683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014582421755076:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.628731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014582421755077:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.628899Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.635020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:32.666603Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014582421755080:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:34:32.815161Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014582421755131:2538] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:33.183405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_table.cpp:814) 2026-01-08T15:34:33.566597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014565241885195:2213];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:33.566712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:33.764250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:33.764477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:33.764665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:33.764729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:33.764754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:33.764795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:33.764916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:33.764990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:33.765067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:33.765156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:33.765288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:33.765397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:33.765542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:33.765657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:33.765765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:33.765864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:33.765953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014586716722831:2340];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:33.766169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:33.766270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:33.766383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:33.766476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014586716722829:2338];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:33.766599Z node 1 ... pp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.825272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.825320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.825329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.829534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.829593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.829607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.830957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.831017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.831032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.837822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.837894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.837915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.838707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.838761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.838777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.844510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.844573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.844595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.850854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.856355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.856415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.856425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.858992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.859042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.859056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.864795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.864864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.864879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.867109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.867163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.867175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.873153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.873233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.873248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.879976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.880054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.880069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.880636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.880679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:35.880693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=42;result=not_found; 2026-01-08T15:34:37.473794Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=1&id=OGE0Y2ZiOTYtNDM5MmVhYTEtYmViYjVhNTUtYTZmZTU0OWY=, ActorId: [1:7593014582421755032:2319], ActorState: ExecuteState, LegacyTraceId: 01kef3xnke151f2pjm2y0z2ah6, Create QueryResponse for error on request, msg: BATCH operations are not supported for column tables at the current time. status# PRECONDITION_FAILED issues# trace_id# |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> BasicUsage::AlterDeadLetterPolicy_StreamingConsumer [GOOD] >> BasicUsage::ConflictingWrites >> ColumnBuildTest::Rejecting >> TTxDataShardReshuffleKMeansScan::BuildToBuild [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToBuildWithOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::CheckPgAutoParams-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 8486, MsgBus: 27545 2026-01-08T15:31:31.212720Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013804896397750:2155];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:31.212940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:31.623196Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:31.636347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:31.636496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:31.667492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:31.746492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013804896397627:2081] 1767886291180299 != 1767886291180302 2026-01-08T15:31:31.756914Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:31.830638Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:32.118928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:32.118951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:32.118959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:32.119034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:32.232608Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:33.103921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:36.173508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.202072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013804896397750:2155];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:36.202426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2026-01-08T15:31:36.520715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.602887Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2026-01-08T15:31:36.665913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2026-01-08T15:31:36.884340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.984756Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2026-01-08T15:31:37.093622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.212991Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2026-01-08T15:31:37.394286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.495744Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2026-01-08T15:31:37.557187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.673225Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2026-01-08T15:31:37.749784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.849386Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2026-01-08T15:31:37.906168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2026-01-08T15:31:38.055978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:38.132762Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2026-01-08T15:31:38.193264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2026-01-08T15:31:38.335715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710698:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:38.443786Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2026-01-08T15:31:38.504699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:38.585306Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2026-01-08T15:31:38.665395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2026-01-08T15:31:38.856191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710709:0, at schemeshard: 72057594046644480, first G ... RROR: schemereq.cpp:624: Actor# [14:7593014456417399297:2547] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:03.566900Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:04.033117Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:04.973277Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:504: Get parsing result with error, self: [14:7593014460712366922:2396], owner: [14:7593014456417399190:2314], statement id: 0 2026-01-08T15:34:04.973786Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=14&id=ZGMzNzdhNjMtZjg0YzUxNDQtOWFjYTYwZGQtNjg0MzU2ODU=, ActorId: [14:7593014460712366920:2395], ActorState: ExecuteState, LegacyTraceId: 01kef3wpd61x6wdwdy15axpazw, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Error while parsing query." severity: 1 issues { position { row: 1 column: 1 } message: "alternative is not implemented yet : 34" end_position { row: 1 column: 1 } severity: 1 } } tx_id# trace_id# 2026-01-08T15:34:05.389531Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [14:7593014465007334247:2408], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:56: Error: At function: PgOp
:2:56: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2026-01-08T15:34:05.391553Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=14&id=MzllMTU1ZjMtNDdmNzE2OTUtZWE3NTJkYjItODJmNjRiZjg=, ActorId: [14:7593014465007334244:2406], ActorState: ExecuteState, LegacyTraceId: 01kef3wps8103n6kgsq9q2bh9r, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple" end_position { row: 1 column: 1 } severity: 1 issues { message: "At tuple" severity: 1 issues { position { row: 1 column: 1 } message: "At function: PgWhere, At lambda" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 2 column: 56 } message: "At function: PgOp" end_position { row: 2 column: 56 } severity: 1 issues { position { row: 2 column: 56 } message: "Unable to find an overload for operator = with given argument type(s): (text,int4)" end_position { row: 2 column: 56 } severity: 1 } } } } } } tx_id# trace_id# 2026-01-08T15:34:05.455859Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [14:7593014465007334261:2415], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:58: Error: At function: PgAnd
:2:68: Error: At function: PgOp
:2:68: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2026-01-08T15:34:05.458932Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=14&id=ZGVmMThhZWMtOWQwNThhM2MtMjJjNjA2NWEtYTY4ZmY2YjY=, ActorId: [14:7593014465007334258:2413], ActorState: ExecuteState, LegacyTraceId: 01kef3wpv2ew8tx26wzwnvr4t0, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple" end_position { row: 1 column: 1 } severity: 1 issues { message: "At tuple" severity: 1 issues { position { row: 1 column: 1 } message: "At function: PgWhere, At lambda" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 2 column: 58 } message: "At function: PgAnd" end_position { row: 2 column: 58 } severity: 1 issues { position { row: 2 column: 68 } message: "At function: PgOp" end_position { row: 2 column: 68 } severity: 1 issues { position { row: 2 column: 68 } message: "Unable to find an overload for operator = with given argument type(s): (text,int4)" end_position { row: 2 column: 68 } severity: 1 } } } } } } } tx_id# trace_id# 2026-01-08T15:34:05.500377Z node 14 :KQP_EXECUTER CRIT: {KQPLIT@kqp_literal_executer.cpp:118} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3wpx89maxh1j71gnb0680, Database: /Root, SessionId: ydb://session/3?node_id=14&id=M2Q5MGExNjMtOWFkYmEyYmEtZmI3NjAzMTctYTkzMjE5NDk=, PoolId: default, IsStreamingQuery: 0}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" trace_id# 2026-01-08T15:34:05.500820Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=14&id=M2Q5MGExNjMtOWFkYmEyYmEtZmI3NjAzMTctYTkzMjE5NDk=, ActorId: [14:7593014465007334270:2419], ActorState: ExecuteState, LegacyTraceId: 01kef3wpx89maxh1j71gnb0680, Create QueryResponse for error on request, msg: status# PRECONDITION_FAILED issues# { message: "(NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: \"a\"\n\n" issue_code: 2029 severity: 1 } trace_id# 2026-01-08T15:34:05.571907Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:05.718024Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:05.867613Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [14:7593014465007334440:2444], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: values have 3 columns, INSERT INTO expects: 2 2026-01-08T15:34:05.871894Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=14&id=MmU4MzgwMC02MGQwMDQwMC02Yjg5ODc1Yi0xNjJhNzg1ZA==, ActorId: [14:7593014465007334437:2442], ActorState: ExecuteState, LegacyTraceId: 01kef3wq805epavrp814za0eb9, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "values have 3 columns, INSERT INTO expects: 2" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id# 2026-01-08T15:34:05.929639Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [14:7593014465007334452:2450], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Failed to convert type: List> to List>
:1:1: Error: Failed to convert 'id': pgunknown to Optional
:1:1: Error: Row type mismatch for table: db.[/Root/PgTable2] 2026-01-08T15:34:05.930336Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=14&id=Y2MzMzIzZDItZTlmNTdkNzQtZWI1YmNiOWYtZjRhMDE0ZmI=, ActorId: [14:7593014465007334449:2448], ActorState: ExecuteState, LegacyTraceId: 01kef3wqa7b4jfd2vr9k0t46qn, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Failed to convert type: List> to List>" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Failed to convert \'id\': pgunknown to Optional" end_position { row: 1 column: 1 } severity: 1 } } issues { position { row: 1 column: 1 } message: "Row type mismatch for table: db.[/Root/PgTable2]" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id# 2026-01-08T15:34:06.606836Z node 14 :KQP_EXECUTER CRIT: {KQPLIT@kqp_literal_executer.cpp:118} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01kef3wqc114bpym349jdntvwb, Database: /Root, SessionId: ydb://session/3?node_id=14&id=M2IxZDQ0OTEtN2RhY2FhZTQtZjU4NWRlY2MtYTMxMmRmYjA=, PoolId: default, IsStreamingQuery: 0}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" trace_id# 2026-01-08T15:34:06.737034Z node 14 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=14&id=M2IxZDQ0OTEtN2RhY2FhZTQtZjU4NWRlY2MtYTMxMmRmYjA=, ActorId: [14:7593014465007334462:2454], ActorState: ExecuteState, LegacyTraceId: 01kef3wqc114bpym349jdntvwb, Create QueryResponse for error on request, msg: status# PRECONDITION_FAILED issues# { message: "(NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: \"a\"\n\n" issue_code: 2029 severity: 1 } trace_id# 2026-01-08T15:34:06.864531Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:07.894887Z node 14 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037892 not found 2026-01-08T15:34:07.983126Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> ColumnBuildTest::DoNotRestoreDeletedRows |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> ColumnBuildTest::Initiating_Failed >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> KqpRboYql::ExpressionSubquery [GOOD] >> S3SettingsConversion::FoldersStrictStyle [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] >> KqpRboYql::Aggregation+ColumnStore [GOOD] >> KqpRboYql::Aggregation-ColumnStore |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStrictStyle [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic >> S3SettingsConversion::Basic [GOOD] |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboYql::ExpressionSubquery [GOOD] Test command err: Trying to start YDB, gRPC: 24321, MsgBus: 14057 2026-01-08T15:34:02.295685Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014453043015722:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:02.295736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:02.583438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:02.583560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:02.587404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:02.717872Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:02.764784Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014453043015698:2081] 1767886442294853 != 1767886442294856 2026-01-08T15:34:02.775930Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:02.905856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:02.905881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:02.905894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:02.905952Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:02.914486Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:03.310570Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:03.420154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:06.068470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014470222885770:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.068619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.069370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014470222885780:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.069443Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:06.333770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_table.cpp:814) 2026-01-08T15:34:07.004884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:07.005211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:07.005496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:07.005621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:07.005735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:07.005852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:07.005969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:07.006071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:07.006204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:07.006208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:07.006305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:07.006514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:07.006660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:07.006787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:07.006878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:07.006969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:07.007105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:07.007199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:07.007327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:07.007382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:07.007440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:07.007567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:07.007618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:07.007708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014470222886355:2338];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:07.007754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:07.007901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037891;self_id=[1:7593014470222886353:2336];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:07.058107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037903;self_id=[1:7593014470222886367:2339];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event ... X_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038000;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.524294Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038000;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.524316Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038000;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.529929Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037955;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.530034Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037955;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.530057Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037955;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.539544Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.539622Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.539643Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.540358Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.540465Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.540574Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550602Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550619Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038008;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550688Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550692Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038008;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550710Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038008;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.550710Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037978;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561626Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561651Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561701Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561702Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561722Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.561722Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572693Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572748Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572771Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572794Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572796Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.572812Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.583784Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038003;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.583866Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038003;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.583888Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038003;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.584965Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.585034Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.585052Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=39;result=not_found; 2026-01-08T15:34:32.780381Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014583349102087:3009], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.780556Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.781481Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014583349102092:3012], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.781555Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014583349102093:3013], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.781705Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.791645Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:32.813567Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014583349102096:3014], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2026-01-08T15:34:32.893704Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014583349102149:5240] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:36.796666Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:34:36.796706Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic [GOOD] |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] Test command err: 2026-01-08T15:33:32.051472Z :WaitEventBlocksBeforeDiscovery INFO: Random seed for debugging is 1767886412051429 2026-01-08T15:33:32.742519Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014322630945432:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:32.742606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.740913Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008173s 2026-01-08T15:33:32.797323Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:32.899417Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:33.341311Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.341535Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:33.422444Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.481048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.481144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.488482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.488552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.524007Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:33.524132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.555539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.718822Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:33.739999Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.742568Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.778532Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.891568Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:34.143023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0013a4/r3tmp/yandexaOkuTI.tmp 2026-01-08T15:33:34.143051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0013a4/r3tmp/yandexaOkuTI.tmp 2026-01-08T15:33:34.143204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0013a4/r3tmp/yandexaOkuTI.tmp 2026-01-08T15:33:34.143287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:34.287599Z INFO: TTestServer started on Port 7622 GrpcPort 65348 PQClient connected to localhost:65348 2026-01-08T15:33:34.821577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:37.743152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014322630945432:2263];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.743242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:38.517053Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014352484544407:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.517154Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.517354Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014352484544441:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.516619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014348400750345:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.517489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014348400750336:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.517597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.518132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014348400750361:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.518218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.521118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:38.524419Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014352484544443:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.525829Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.604812Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014352484544446:2139] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:33:38.618768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014348400750351:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2026-01-08T15:33:38.618901Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014352484544444:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2026-01-08T15:33:38.688907Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014348400750455:3007] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:38.695975Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014352484544473:2145] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:38.934835Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014348400750469:2344], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:38.935141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:38.937414Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=N2EwMjM4NzctOTYwZGQ3ZmMtZmQyMTNmMmItOWZiNjhiNGQ=, ActorId: [1:7593014348400750333:2331], ActorState: ExecuteState, LegacyTraceId: 01kef3vwj72ceh80nsqk2xsayy, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find tabl ... Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:42.414147Z :INFO: [/Root] [/Root] [c9d60255-bc8ab67c-b84a24-8f9de371] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1195 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:42.414181Z :INFO: [/Root] [/Root] [c9d60255-bc8ab67c-b84a24-8f9de371] Closing read session. Close timeout: 0.000000s 2026-01-08T15:34:42.414211Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:42.414249Z :INFO: [/Root] [/Root] [c9d60255-bc8ab67c-b84a24-8f9de371] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1195 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:42.415876Z :NOTICE: [/Root] [/Root] [c9d60255-bc8ab67c-b84a24-8f9de371] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:42.414579Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_16449802368185773326_v1 grpc read done: success# 0, data# { } 2026-01-08T15:34:42.414619Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_16449802368185773326_v1 grpc read failed 2026-01-08T15:34:42.414657Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_16449802368185773326_v1 grpc closed 2026-01-08T15:34:42.414699Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 3 consumer shared/user session shared/user_3_3_16449802368185773326_v1 is DEAD 2026-01-08T15:34:42.415397Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_10177658620911296086_v1 grpc read done: success# 0, data# { } 2026-01-08T15:34:42.415411Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_10177658620911296086_v1 grpc read failed 2026-01-08T15:34:42.415435Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_10177658620911296086_v1 grpc closed 2026-01-08T15:34:42.415469Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 2 consumer shared/user session shared/user_3_2_10177658620911296086_v1 is DEAD 2026-01-08T15:34:42.416024Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337236:2482] disconnected. 2026-01-08T15:34:42.416047Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337236:2482] disconnected; active server actors: 1 2026-01-08T15:34:42.416078Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337236:2482] client user disconnected session shared/user_3_3_16449802368185773326_v1 2026-01-08T15:34:42.416131Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1187: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2026-01-08T15:34:42.416212Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1259: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnreadableFamilies=0 [], RequireBalancing=0 [] 2026-01-08T15:34:42.416241Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1326: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2026-01-08T15:34:42.416266Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1404: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000035s 2026-01-08T15:34:42.416766Z :INFO: [/Root] [/Root] [b470438e-6f4ac9b7-38274382-ca43568f] Closing read session. Close timeout: 0.000000s 2026-01-08T15:34:42.416807Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2026-01-08T15:34:42.416839Z :INFO: [/Root] [/Root] [b470438e-6f4ac9b7-38274382-ca43568f] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1204 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:42.416915Z :NOTICE: [/Root] [/Root] [b470438e-6f4ac9b7-38274382-ca43568f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:42.417388Z :INFO: [/Root] [/Root] [99ad5d28-91f10670-a2b68d34-368feb6c] Closing read session. Close timeout: 0.000000s 2026-01-08T15:34:42.417418Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:34:42.417445Z :INFO: [/Root] [/Root] [99ad5d28-91f10670-a2b68d34-368feb6c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1236 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:34:42.417491Z :NOTICE: [/Root] [/Root] [99ad5d28-91f10670-a2b68d34-368feb6c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:34:42.416842Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_7880598906194444323_v1 grpc read done: success# 0, data# { } 2026-01-08T15:34:42.416857Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_7880598906194444323_v1 grpc read failed 2026-01-08T15:34:42.416874Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_7880598906194444323_v1 grpc closed 2026-01-08T15:34:42.416888Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_3_1_7880598906194444323_v1 is DEAD 2026-01-08T15:34:42.417227Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|c9e90ec5-c3c51aad-7c9e0ccd-b7bf1069_0 grpc read done: success: 0 data: 2026-01-08T15:34:42.417237Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|c9e90ec5-c3c51aad-7c9e0ccd-b7bf1069_0 grpc read failed 2026-01-08T15:34:42.417260Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|c9e90ec5-c3c51aad-7c9e0ccd-b7bf1069_0 grpc closed 2026-01-08T15:34:42.417275Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|c9e90ec5-c3c51aad-7c9e0ccd-b7bf1069_0 is DEAD 2026-01-08T15:34:42.417689Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:34:42.420343Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_3_2_10177658620911296086_v1 2026-01-08T15:34:42.420403Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014621760337252:2495] destroyed 2026-01-08T15:34:42.420439Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014621760337285:2492] destroyed 2026-01-08T15:34:42.420471Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:34:42.420505Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:42.420520Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.420534Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:42.420550Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.420562Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:42.420642Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_2_10177658620911296086_v1 2026-01-08T15:34:42.424823Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337240:2481] disconnected. 2026-01-08T15:34:42.424873Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337240:2481] disconnected; active server actors: 1 2026-01-08T15:34:42.424896Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337240:2481] client user disconnected session shared/user_3_2_10177658620911296086_v1 2026-01-08T15:34:42.424970Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1187: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2026-01-08T15:34:42.425008Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337241:2480] disconnected. 2026-01-08T15:34:42.425023Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337241:2480] disconnected; active server actors: 1 2026-01-08T15:34:42.425039Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014621760337241:2480] client user disconnected session shared/user_3_1_7880598906194444323_v1 2026-01-08T15:34:42.466688Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:42.466741Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.466754Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:42.466774Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.466787Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:34:42.566605Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:34:42.566655Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.566672Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:34:42.566698Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:34:42.566715Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 |87.4%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink >> TSyncComputeActorTest::Basic [GOOD] >> ColumnShardTiers::DSConfigsStub >> TSyncComputeActorTest::StatsMode >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> TSyncComputeActorTest::StatsMode [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 |87.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |87.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |87.4%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> TSyncComputeActorTest::InputTransformMultichannel |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |87.5%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut >> ColumnBuildTest::Initiating_Failed [GOOD] >> ColumnBuildTest::Initiating_DisableFlag >> S3SettingsConversion::Port >> S3SettingsConversion::Port [GOOD] |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |87.5%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> TTopicApiDescribes::GetLocalDescribe |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> TxUsage::WriteToTopic_Demo_47_Table [GOOD] |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port [GOOD] |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnBuildTest::Rejecting [GOOD] |87.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |87.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |87.5%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |87.5%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> ColumnBuildTest::Unlocking_DisableFlag |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> ColumnBuildTest::Initiating_DisableFlag [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> TxUsage::WriteToTopic_Demo_47_Query >> ColumnBuildTest::DoNotRestoreDeletedRows [GOOD] >> ColumnBuildTest::DisabledAndEnabledFlag |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> KqpRboYql::Aggregation-ColumnStore [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToBuildWithOverlap [GOOD] >> TTxDataShardSampleKScan::BadRequest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 |87.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::Initiating_DisableFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:34:43.686597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:43.686705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:43.686759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:43.686819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:43.686859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:43.686906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:43.686970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:43.687032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:43.687921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:43.688236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:43.854884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:43.854941Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:43.869373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:43.869728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:43.869961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:43.885425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:43.885906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:43.886692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:43.888922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:43.905131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:43.905331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:43.906532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:43.906610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:43.906732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:43.906778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:43.906879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:43.907065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:44.071095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.072938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.073016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.073100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.073184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:44.073260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 1474976730758 ready parts: 1/1 2026-01-08T15:34:52.984119Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976730758:0 progress is 1/1 2026-01-08T15:34:52.984145Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976730758 ready parts: 1/1 2026-01-08T15:34:52.984176Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976730758, ready parts: 1/1, is published: true 2026-01-08T15:34:52.984238Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:1766:3633] message: TxId: 281474976730758 2026-01-08T15:34:52.984283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976730758 ready parts: 1/1 2026-01-08T15:34:52.984315Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976730758:0 2026-01-08T15:34:52.984342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976730758:0 2026-01-08T15:34:52.984409Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 32] was 3 2026-01-08T15:34:52.993034Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976730758 2026-01-08T15:34:52.993119Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976730758 2026-01-08T15:34:52.993199Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:3019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 105, txId# 281474976730758 2026-01-08T15:34:52.993347Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:3022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Rejection_Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: At Initiating state got unsuccess propose result, status: StatusPreconditionFailed, reason: Adding columns with defaults is disabled, SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusPreconditionFailed, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, DropColumnsTxId: 281474976730757, DropColumnsTxStatus: StatusAccepted, DropColumnsTxDone: 1, UnlockTxId: 281474976730758, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0}, txId# 281474976730758 2026-01-08T15:34:53.002810Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Rejection_Unlocking 2026-01-08T15:34:53.003002Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Rejection_Unlocking TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Rejection_Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: At Initiating state got unsuccess propose result, status: StatusPreconditionFailed, reason: Adding columns with defaults is disabled, SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusPreconditionFailed, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, DropColumnsTxId: 281474976730757, DropColumnsTxStatus: StatusAccepted, DropColumnsTxDone: 1, UnlockTxId: 281474976730758, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:53.003077Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Rejection_Unlocking to Rejected 2026-01-08T15:34:53.009325Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Rejected 2026-01-08T15:34:53.009507Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Rejected TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Rejected, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: At Initiating state got unsuccess propose result, status: StatusPreconditionFailed, reason: Adding columns with defaults is disabled, SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusPreconditionFailed, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, DropColumnsTxId: 281474976730757, DropColumnsTxStatus: StatusAccepted, DropColumnsTxDone: 1, UnlockTxId: 281474976730758, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:53.009579Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 105, subscribers count# 1 2026-01-08T15:34:53.009818Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2026-01-08T15:34:53.009875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:1826:3681] TestWaitNotification: OK eventTxId 105 2026-01-08T15:34:53.010704Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 105 2026-01-08T15:34:53.011117Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 105 Issues { message: "At Initiating state got unsuccess propose result, status: StatusPreconditionFailed, reason: Adding columns with defaults is disabled" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 105 Issues { message: "At Initiating state got unsuccess propose result, status: StatusPreconditionFailed, reason: Adding columns with defaults is disabled" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 EndTime { } } 2026-01-08T15:34:53.012300Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLessDB/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409549 2026-01-08T15:34:53.012567Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409549 describe path "/MyRoot/ServerLessDB/Table" took 289us result status StatusSuccess 2026-01-08T15:34:53.013041Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLessDB/Table" PathDescription { Self { Name: "Table" PathId: 32 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 39 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 39 } PathsInside: 31 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 38 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 32 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_column_build/unittest >> TSentinelTests::PDiskErrorState >> KikimrIcGateway::TestLoadExternalTable |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] >> KqpRboYql::LeftJoins [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboYql::Aggregation-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 12398, MsgBus: 26685 2026-01-08T15:33:58.619326Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014437795953057:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:58.619409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:59.026571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:59.026680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:59.031651Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:59.050125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:59.072792Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:59.075588Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014437795953030:2081] 1767886438616333 != 1767886438616336 2026-01-08T15:33:59.306408Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:59.306935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:59.306948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:59.306953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:59.307016Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:59.634536Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:59.824547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:02.415123Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014454975823103:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.415260Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.418038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014454975823113:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.418158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.719697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:02.938890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014454975823210:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.938982Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.939481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014454975823213:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.939542Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.957274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:03.091642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014459270790599:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.091746Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.092425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014459270790605:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.092479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014459270790603:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.092674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:03.098146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:03.119494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014459270790608:2354], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:34:03.212139Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014459270790659:2657] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:03.621126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014437795953057:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:03.621238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8875, MsgBus: 24886 2026-01-08T15:34:04.849777Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:04.849874Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:34:04.939400Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:04.939490Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:04.954470Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014460311852827:2081] 1767886444747959 != 1767886444747962 2026-01-08T15:34:04.970869Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:04.983033Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:05.032838Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:05.133653Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:05.133677Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:05.133683Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:05.133760Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:05.615151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:05.850494Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:08.323864Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014477491722896:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, iss ... _service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:20.933077Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014529588261323:3126], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:20.933151Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014529588261322:3125], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:20.933214Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:20.937599Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:20.971496Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014529588261326:3127], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:34:21.065642Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014533883228675:5245] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:25.610460Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:34:25.610492Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded Trying to start YDB, gRPC: 10924, MsgBus: 16441 2026-01-08T15:34:44.176697Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014635743020778:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:44.177471Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:44.251645Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:44.404362Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:44.599900Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:44.600025Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:44.603008Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014635743020754:2081] 1767886484172060 != 1767886484172063 2026-01-08T15:34:44.608494Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:44.649820Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:44.714589Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:44.778971Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:44.779002Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:44.779011Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:44.779104Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:45.168899Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:45.387445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:45.396365Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:48.867843Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014652922890823:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:48.867941Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:48.868410Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014652922890833:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:48.868466Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:48.892614Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:49.070741Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014657217858221:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.070832Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.071271Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014657217858223:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.071332Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.103053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:49.177119Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014635743020778:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:49.177202Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:49.186683Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014657217858301:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.186787Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.187206Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014657217858308:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.187302Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014657217858307:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.187481Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:49.193301Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:49.226478Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014657217858311:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:34:49.288120Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014657217858362:2645] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest >> KikimrIcGateway::TestSecretsExistingValidation-UseSchemaSecrets >> ColumnBuildTest::Unlocking_DisableFlag [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets-UseAuthToken >> KikimrIcGateway::TestListPath >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] >> ColumnBuildTest::DisabledAndEnabledFlag [GOOD] |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/close_with_load/ydb-core-kqp-ut-close_with_load |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/close_with_load/ydb-core-kqp-ut-close_with_load |87.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/close_with_load/ydb-core-kqp-ut-close_with_load >> KikimrIcGateway::TestDropExternalTable |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |87.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan+UseSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query >> KikimrIcGateway::TestCreateExternalTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::Unlocking_DisableFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:34:41.974301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:41.978925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:41.979020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:41.979091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:41.979142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:41.979183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:41.979281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:41.979369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:41.980395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:41.980810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:42.168832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:42.168905Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:42.194350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:42.194807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:42.195072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:42.229263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:42.229831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:42.230761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:42.232125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:42.241304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:42.241582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:42.243023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:42.243119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:42.243286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:42.243345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:42.243458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:42.243672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:42.425696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.426983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.427984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.428050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.428134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:42.428227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... BUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2026-01-08T15:34:57.010591Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976725761:0 progress is 1/1 2026-01-08T15:34:57.010627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2026-01-08T15:34:57.010671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2026-01-08T15:34:57.010763Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:1895:3760] message: TxId: 281474976725761 2026-01-08T15:34:57.010831Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2026-01-08T15:34:57.010878Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976725761:0 2026-01-08T15:34:57.010918Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976725761:0 2026-01-08T15:34:57.011034Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 32] was 3 2026-01-08T15:34:57.015399Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2026-01-08T15:34:57.015529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976725761 2026-01-08T15:34:57.015628Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:3019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 105, txId# 281474976725761 2026-01-08T15:34:57.015814Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:3022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0}, txId# 281474976725761 2026-01-08T15:34:57.023513Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Unlocking 2026-01-08T15:34:57.023727Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Unlocking TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:57.023822Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2026-01-08T15:34:57.032131Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Done 2026-01-08T15:34:57.032336Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Done TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Done, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 281474976725758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:57.032392Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 105, subscribers count# 1 2026-01-08T15:34:57.032591Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2026-01-08T15:34:57.032648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:1954:3808] TestWaitNotification: OK eventTxId 105 2026-01-08T15:34:57.033594Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 105 2026-01-08T15:34:57.034014Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 105 Issues { message: "TShardStatus { ShardIdx: 72075186233409549:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 105 Issues { message: "TShardStatus { ShardIdx: 72075186233409549:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } 2026-01-08T15:34:57.035252Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLessDB/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409549 2026-01-08T15:34:57.035576Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409549 describe path "/MyRoot/ServerLessDB/Table" took 332us result status StatusSuccess 2026-01-08T15:34:57.036120Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLessDB/Table" PathDescription { Self { Name: "Table" PathId: 32 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "default_value" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 39 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 39 } PathsInside: 31 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 38 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 32 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_column_build/unittest >> YdbTableSplit::SplitByLoadWithReads [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] Test command err: 2026-01-08T15:34:27.424782Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014562353588052:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:27.424908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:27.433472Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.026360s 2026-01-08T15:34:28.064073Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:28.125890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:28.130498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:28.154682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:28.248159Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:28.274461Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:28.439352Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:28.509017Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.022499s 2026-01-08T15:34:28.541771Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.008826s 2026-01-08T15:34:28.951144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:28.951185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:28.951194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:28.951312Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:29.283503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:32.426470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014562353588052:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:32.426556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:32.840981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014583828425831:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.841079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.842664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014583828425841:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.842758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.272246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.535542Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393302:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.535618Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.543097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393305:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.543185Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.592439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) Table has 1 shards 2026-01-08T15:34:33.947661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393423:2381], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.949775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.951317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393446:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.953770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393440:2391], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.953842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393441:2392], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.953877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393443:2394], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.954172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.957177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393461:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.957383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393463:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.957430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393457:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.957489Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.963199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393439:2390], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.988570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393503:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.988650Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014588123393505:2412], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.988743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.991844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710660:0, at schemeshard: 72057594046644480 2026-01-08T15:34:33.992077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:34:33.992108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path ... 3 state SplitSrcSendingSnapshot (wrong shard state)" issue_code: 2006 severity: 1 } } trace_id# 2026-01-08T15:34:53.633675Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:34:53.633932Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:34:53.635966Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037890 2026-01-08T15:34:53.636087Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037890 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:34:53.636127Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037890 2026-01-08T15:34:53.636151Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037890 2026-01-08T15:34:53.636400Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2026-01-08T15:34:53.644114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715693:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715693 TabletId: 72075186224037888 2026-01-08T15:34:53.644442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976715693:0 131 -> 132 2026-01-08T15:34:53.645912Z node 1 :GLOBAL WARN: log.cpp:841: fline=events.h:105;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state);tx_id=0; 2026-01-08T15:34:53.645947Z node 1 :TX_DATASHARD NOTICE: datashard.cpp:3149: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state) 2026-01-08T15:34:53.646008Z node 1 :GLOBAL WARN: log.cpp:841: fline=events.h:105;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state);tx_id=0; 2026-01-08T15:34:53.646024Z node 1 :TX_DATASHARD NOTICE: datashard.cpp:3149: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state) 2026-01-08T15:34:53.646110Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:880: SelfId: [1:7593014674022787696:2359], Table: `/Root/Foo` ([72057594046644480:38:2]), SessionActorId: [1:7593014588123393395:2359]Got OVERLOADED for table `/Root/Foo`. ShardID=72075186224037888, Sink=[1:7593014674022787696:2359]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 } 2026-01-08T15:34:53.646142Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:880: SelfId: [1:7593014674022787699:2375], Table: `/Root/Foo` ([72057594046644480:38:2]), SessionActorId: [1:7593014588123393416:2375]Got OVERLOADED for table `/Root/Foo`. ShardID=72075186224037888, Sink=[1:7593014674022787699:2375]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 } 2026-01-08T15:34:53.646189Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:4656: SelfId: [1:7593014674022787686:2359], SessionActorId: [1:7593014588123393395:2359], statusCode=OVERLOADED. Issue=
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`., code: 2006
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 . sessionActorId=[1:7593014588123393395:2359]. 2026-01-08T15:34:53.646218Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:4656: SelfId: [1:7593014674022787688:2375], SessionActorId: [1:7593014588123393416:2375], statusCode=OVERLOADED. Issue=
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`., code: 2006
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 . sessionActorId=[1:7593014588123393416:2375]. 2026-01-08T15:34:53.646389Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2328} SessionId: ydb://session/3?node_id=1&id=ZGVjMWJjZjQtODVmNWIxYTItOTk2MzFmYzYtMjFjNmRmNzU=, ActorId: [1:7593014588123393395:2359], ActorState: ExecuteState, LegacyTraceId: 01kef3y5y60cpr8m2fcm44pfpx, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [1:7593014674022787687:2359] from: [1:7593014674022787686:2359] trace_id# 2026-01-08T15:34:53.646419Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2328} SessionId: ydb://session/3?node_id=1&id=NmUyNmJlZjEtMTBlMzNhNjgtODNjODY5NTgtNmM1MTYwZWY=, ActorId: [1:7593014588123393416:2375], ActorState: ExecuteState, LegacyTraceId: 01kef3y5y682nh18443ddyck8v, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [1:7593014674022787689:2375] from: [1:7593014674022787688:2375] trace_id# 2026-01-08T15:34:53.646470Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1269} ActorId: [1:7593014674022787687:2359] TxId: 281474976714964. Ctx: { TraceId: 01kef3y5y60cpr8m2fcm44pfpx, Database: , SessionId: ydb://session/3?node_id=1&id=ZGVjMWJjZjQtODVmNWIxYTItOTk2MzFmYzYtMjFjNmRmNzU=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Runtime error Status# OVERLOADED Issues# {
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`., code: 2006 subissue: {
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 } } trace_id# 2026-01-08T15:34:53.646493Z node 1 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1269} ActorId: [1:7593014674022787689:2375] TxId: 281474976714965. Ctx: { TraceId: 01kef3y5y682nh18443ddyck8v, Database: , SessionId: ydb://session/3?node_id=1&id=NmUyNmJlZjEtMTBlMzNhNjgtODNjODY5NTgtNmM1MTYwZWY=, PoolId: default, DatabaseId: /Root, IsStreamingQuery: 0}. Runtime error Status# OVERLOADED Issues# {
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`., code: 2006 subissue: {
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state), code: 2006 } } trace_id# 2026-01-08T15:34:53.646757Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=1&id=ZGVjMWJjZjQtODVmNWIxYTItOTk2MzFmYzYtMjFjNmRmNzU=, ActorId: [1:7593014588123393395:2359], ActorState: ExecuteState, LegacyTraceId: 01kef3y5y60cpr8m2fcm44pfpx, Create QueryResponse for error on request, msg: status# OVERLOADED issues# { message: "Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`." issue_code: 2006 severity: 1 issues { message: "Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state)" issue_code: 2006 severity: 1 } } trace_id# 2026-01-08T15:34:53.646776Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=1&id=NmUyNmJlZjEtMTBlMzNhNjgtODNjODY5NTgtNmM1MTYwZWY=, ActorId: [1:7593014588123393416:2375], ActorState: ExecuteState, LegacyTraceId: 01kef3y5y682nh18443ddyck8v, Create QueryResponse for error on request, msg: status# OVERLOADED issues# { message: "Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037888 is overloaded. Table `/Root/Foo`." issue_code: 2006 severity: 1 issues { message: "Rejecting data TxId 0 because datashard 72075186224037888: is in process of split opId 281474976715693 state SplitSrcWaitForPartitioningChanged (wrong shard state)" issue_code: 2006 severity: 1 } } trace_id# 2026-01-08T15:34:53.648923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2026-01-08T15:34:53.649204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2026-01-08T15:34:53.649280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976715693:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:34:53.651227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 38 Version: 5 PathOwnerId: 72057594046644480, cookie: 281474976715693 2026-01-08T15:34:53.651270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715693 2026-01-08T15:34:53.651288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715693, pathId: [OwnerId: 72057594046644480, LocalPathId: 38], version: 5 2026-01-08T15:34:53.662092Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037888 Initiating switch from PreOffline to Offline state 2026-01-08T15:34:53.662146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715693:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037888, at schemeshard: 72057594046644480 2026-01-08T15:34:53.662235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976715693:0 progress is 1/1 2026-01-08T15:34:53.662262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976715693:0 progress is 1/1 2026-01-08T15:34:53.662311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976715693:0 2026-01-08T15:34:53.664937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:270: Unable to activate 281474976715693:0 2026-01-08T15:34:53.667129Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3339: 72075186224037888 Reporting state Offline to schemeshard 72057594046644480 2026-01-08T15:34:53.674168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2026-01-08T15:34:53.688891Z node 1 :TX_DATASHARD INFO: datashard.cpp:196: OnTabletStop: 72075186224037888 reason = ReasonStop 2026-01-08T15:34:53.689766Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2026-01-08T15:34:53.690995Z node 1 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:34:53.691119Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 Table has 2 shards |87.6%| [TM] {BAZEL_UPLOAD} ydb/services/ydb/table_split_ut/unittest >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink >> KikimrProvider::TestFillAuthPropertiesNone [GOOD] >> KikimrProvider::TestFillAuthPropertiesServiceAccount [GOOD] >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::DisabledAndEnabledFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:34:43.345923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:43.346016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:43.346071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:43.346131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:43.346181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:43.346220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:43.346289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:43.346396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:43.347234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:43.347560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:43.482868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:34:43.482933Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:43.494559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:43.494910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:43.495136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:43.521733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:43.522290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:43.523121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:43.524399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:43.528398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:43.528659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:43.529908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:43.529976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:43.530132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:43.530180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:43.530285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:43.530510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:43.693713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.694959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.695996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.696079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:43.696156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 11Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976730761 ready parts: 1/1 2026-01-08T15:34:57.520648Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976730761:0 progress is 1/1 2026-01-08T15:34:57.520677Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976730761 ready parts: 1/1 2026-01-08T15:34:57.520713Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976730761, ready parts: 1/1, is published: true 2026-01-08T15:34:57.520791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:1680:3548] message: TxId: 281474976730761 2026-01-08T15:34:57.520839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976730761 ready parts: 1/1 2026-01-08T15:34:57.520871Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976730761:0 2026-01-08T15:34:57.520898Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976730761:0 2026-01-08T15:34:57.520990Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 32] was 3 2026-01-08T15:34:57.524291Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976730761 2026-01-08T15:34:57.524400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976730761 2026-01-08T15:34:57.524469Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:3019: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 105, txId# 281474976730761 2026-01-08T15:34:57.524588Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:3022: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:1744:3600], AlterMainTableTxId: 281474976730758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976730757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976730759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 500, ApplyTxId: 281474976730760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976730761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0}, txId# 281474976730761 2026-01-08T15:34:57.526739Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Unlocking 2026-01-08T15:34:57.526901Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Unlocking TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:1744:3600], AlterMainTableTxId: 281474976730758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976730757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976730759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 500, ApplyTxId: 281474976730760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976730761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:57.527088Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2026-01-08T15:34:57.529052Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1950: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Done 2026-01-08T15:34:57.529188Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1951: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 105 Done TBuildInfo{ IndexBuildId: 105, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 32], IndexType: EIndexTypeInvalid, IndexName: , State: Done, SubState: None, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:1744:3600], AlterMainTableTxId: 281474976730758, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976730757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976730759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 500, ApplyTxId: 281474976730760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, DropColumnsTxId: 0, DropColumnsTxStatus: StatusSuccess, DropColumnsTxDone: 0, UnlockTxId: 281474976730761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0, Billed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0} 2026-01-08T15:34:57.529256Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 105, subscribers count# 1 2026-01-08T15:34:57.529400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2026-01-08T15:34:57.529446Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:1761:3616] TestWaitNotification: OK eventTxId 105 2026-01-08T15:34:57.530130Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 105 2026-01-08T15:34:57.530573Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 105 Issues { message: "TShardStatus { ShardIdx: 72075186233409549:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 105 Issues { message: "TShardStatus { ShardIdx: 72075186233409549:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: UploadRows: 0 UploadBytes: 0 ReadRows: 0 ReadBytes: 0 CpuTimeUs: 0 }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "default_value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } 2026-01-08T15:34:57.531379Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLessDB/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409549 2026-01-08T15:34:57.531741Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409549 describe path "/MyRoot/ServerLessDB/Table" took 358us result status StatusSuccess 2026-01-08T15:34:57.532250Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLessDB/Table" PathDescription { Self { Name: "Table" PathId: 32 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "default_value" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 39 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 39 } PathsInside: 31 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 38 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 32 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_column_build/unittest >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestGetStatusWorks >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboYql::LeftJoins [GOOD] Test command err: Trying to start YDB, gRPC: 18202, MsgBus: 14264 2026-01-08T15:33:57.943065Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014432041822144:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:57.944078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:58.007884Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:58.439811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:58.439925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:58.509001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:58.600903Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:58.601036Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014432041822109:2081] 1767886437923335 != 1767886437923338 2026-01-08T15:33:58.618533Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:58.858841Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:58.902999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:33:58.903029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:33:58.903035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:33:58.903115Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:58.962480Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:59.604053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:59.610437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:34:02.432563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014453516659481:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.432715Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.433156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014453516659491:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.433257Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:02.743417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_table.cpp:814) 2026-01-08T15:34:02.932092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014432041822144:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:02.932174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:03.525894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:03.525894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:03.526258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:03.526500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:03.526604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:03.526664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:03.526699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:03.526800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:03.526850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:03.526854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:03.526917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:03.526938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:03.526974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:03.527027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:03.527043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:03.527098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:03.527122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:03.527170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:03.527218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:03.527222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:03.527292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:03.527311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:03.527375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014457811627349:2332];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:03.527401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037918;self_id=[1:7593014457811627384:2365];tablet_id=72075186224037918;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_registe ... s_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.078304Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038109;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.078383Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038109;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.078398Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038095;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.078403Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038109;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.078417Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038095;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091056Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038136;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091056Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038123;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091126Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038136;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091126Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038123;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091147Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038136;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.091185Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038123;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.102996Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038115;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.103084Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038115;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.103105Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038115;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.110654Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038106;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.110740Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038106;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.110760Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038106;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.114670Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038104;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.114740Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038104;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.114762Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038104;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.125658Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038113;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.125749Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038113;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.125770Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038113;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.130383Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038140;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.130460Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038140;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.130483Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038140;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.136367Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038129;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.136453Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038129;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.136475Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038129;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.148237Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038139;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.148311Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038139;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.148331Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038139;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=41;result=not_found; 2026-01-08T15:34:46.184951Z node 3 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01kef3xey38sk86b7kv2a4knp1", SessionId: ydb://session/3?node_id=3&id=NDFhOGRmYzAtYjkyODBmMWItNzdjMDYxZmUtNmIzMTQxZDg=, Slow query, duration: 16.098987s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE `/Root/t1` (\n a Int64 NOT NULL,\n b String,\n c Int64,\n primary key(a)\n ) with (Store = Column);\n\n CREATE TABLE `/Root/t2` (\n a Int64\tNOT NULL,\n b String,\n c Int64,\n primary key(a)\n ) with (Store = Column);\n\n CREATE TABLE `/Root/t3` (\n a Int64 NOT NULL,\n b String,\n c Int64,\n primary key(a)\n ) with (Store = Column);\n\n CREATE TABLE `/Root/t4` (\n a Int64 NOT NULL,\n b String,\n c Int64,\n primary key(a)\n ) with (Store = Column);\n ", parameters: 0b 2026-01-08T15:34:46.393796Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014641273075398:3715], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:46.393913Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:46.394389Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014641273075403:3718], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:46.394444Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014641273075404:3719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:46.394482Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:46.400513Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:46.435122Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014641273075407:3720], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2026-01-08T15:34:46.492394Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014641273075458:7994] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/ydb-core-tx-schemeshard-ut_consistent_copy_tables |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/ydb-core-tx-schemeshard-ut_consistent_copy_tables |87.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/ydb-core-tx-schemeshard-ut_consistent_copy_tables >> KikimrIcGateway::TestLoadTableMetadata ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReads [GOOD] Test command err: 2026-01-08T15:34:26.715458Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014555993145344:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:26.715513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:27.234541Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:27.268275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:27.268385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:27.302782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:27.473193Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:27.595352Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:27.766904Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:27.950810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:27.950832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:27.950839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:27.950917Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:28.385333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:31.173573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014577467983235:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.173717Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.174155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014577467983244:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.174199Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.649506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:31.716360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014555993145344:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:31.716436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:31.901862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014577467983415:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.901984Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.902304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014577467983418:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.902364Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.928845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) Table has 1 shards 2026-01-08T15:34:32.082781Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950820:2377], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.082933Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.087017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950833:2385], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.087145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950834:2386], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.087187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950832:2384], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.087296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.092434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950841:2390], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.094856Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950852:2392], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.095087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.095091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950854:2394], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.095392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950875:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.095510Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.097163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715661:0, at schemeshard: 72057594046644480 2026-01-08T15:34:32.097390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014581762950884:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.097722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715661:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:34:32.097752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715661:1, at schemeshard: 72057594046644480 2026-01-08T15:34:32.097880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715661:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:34:32.097897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715661:2, at schemeshard: 72057594046644480 2026-01-08T15:34:32.097953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715661:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:34:32.098033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715661:3, path# /Root/.metadata/workload_manager/pools/default 2026-01-08T15:34:32.098297Z node 1 :FLAT_TX_SCHEMESHARD I ... orkingDir: "/Root" OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SplitBoundary { SerializedKeyPrefix: "\002\000\004\000\000\000\"\214\303\202\000\000\000\200" } SchemeshardId: 72057594046644480 } Internal: true FailOnExist: false } TxId: 281474976710693 TabletId: 72057594046644480 2026-01-08T15:34:51.859513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976710693:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SplitBoundary { SerializedKeyPrefix: "\002\000\004\000\000\000\"\214\303\202\000\000\000\200" } SchemeshardId: 72057594046644480 2026-01-08T15:34:51.860155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976710693:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\"\214\303\202\000\000\000\200" ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\004\000\000\000\"\214\303\202\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SplitBoundary { SerializedKeyPrefix: "\002\000\004\000\000\000\"\214\303\202\000\000\000\200" } SchemeshardId: 72057594046644480 2026-01-08T15:34:51.860191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710693:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:34:51.871544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710693:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2026-01-08T15:34:51.887686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710693:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2026-01-08T15:34:51.888016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710693:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2026-01-08T15:34:51.888056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710693:0 2 -> 3 2026-01-08T15:34:51.891767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:84: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2026-01-08T15:34:51.905967Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:7593014663367397683:9229] 2026-01-08T15:34:51.916774Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:7593014663367397689:9230] 2026-01-08T15:34:51.932288Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:34:51.932378Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:34:51.932524Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:34:51.940814Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:34:51.940902Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2026-01-08T15:34:51.941041Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2026-01-08T15:34:51.945730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976710693:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976710693:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976710693 TabletId: 72075186224037889 2026-01-08T15:34:51.949624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976710693:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976710693:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976710693 TabletId: 72075186224037890 2026-01-08T15:34:51.949662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710693:0 3 -> 131 2026-01-08T15:34:51.951165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976710693:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:34:51.971044Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037889 2026-01-08T15:34:51.971169Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:34:51.971228Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037889 2026-01-08T15:34:51.971255Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:34:51.971487Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:34:51.971607Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037890 2026-01-08T15:34:51.971678Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037890 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:34:51.971729Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037890 2026-01-08T15:34:51.971748Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037890 2026-01-08T15:34:51.971946Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2026-01-08T15:34:51.975585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976710693:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710693 TabletId: 72075186224037888 2026-01-08T15:34:51.975936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710693:0 131 -> 132 2026-01-08T15:34:51.978436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2026-01-08T15:34:51.978720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2026-01-08T15:34:51.978794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976710693:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:34:51.979383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 38 Version: 5 PathOwnerId: 72057594046644480, cookie: 281474976710693 2026-01-08T15:34:51.979416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710693 2026-01-08T15:34:51.979428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710693, pathId: [OwnerId: 72057594046644480, LocalPathId: 38], version: 5 2026-01-08T15:34:51.985111Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037888 Initiating switch from PreOffline to Offline state 2026-01-08T15:34:51.985126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976710693:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037888, at schemeshard: 72057594046644480 2026-01-08T15:34:51.985222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710693:0 progress is 1/1 2026-01-08T15:34:51.985247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710693:0 progress is 1/1 2026-01-08T15:34:51.985288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710693:0 2026-01-08T15:34:51.987317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:270: Unable to activate 281474976710693:0 2026-01-08T15:34:51.988177Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3339: 72075186224037888 Reporting state Offline to schemeshard 72057594046644480 2026-01-08T15:34:51.993225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2026-01-08T15:34:52.002747Z node 1 :TX_DATASHARD INFO: datashard.cpp:196: OnTabletStop: 72075186224037888 reason = ReasonStop 2026-01-08T15:34:52.003536Z node 1 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:34:52.003657Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 2026-01-08T15:34:52.003770Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2026-01-08T15:34:52.003937Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 0 2026-01-08T15:34:52.110179Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:52.318354Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:52.718805Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:53.414398Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:54.458440Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:55.139623Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2026-01-08T15:34:55.897556Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1107: TxId: 281474976722411, task: 1, CA Id [1:7593014663367397899:2368]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 Table has 2 shards |87.6%| [TM] {BAZEL_UPLOAD} ydb/services/ydb/table_split_ut/unittest >> TMiniKQLEngineFlatTest::TestPureProgram >> TMiniKQLEngineFlatTest::TestPureProgram [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExists >> TMiniKQLEngineFlatTest::TestSelectRangeFullExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive >> ReadAttributesUtils::AttributesGatheringFilter [GOOD] >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> ReadAttributesUtils::ReplaceAttributesEmpty [GOOD] >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestMapsPushdown >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb >> TMiniKQLEngineFlatTest::TestMapsPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidYdb >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |87.6%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> TMiniKQLProtoTestYdb::TestExportVoidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStringYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExists >> TMiniKQLProtoTestYdb::TestExportStringYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey >> TMiniKQLProtoTestYdb::TestExportUuidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItems [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItemsFromNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull >> TMiniKQLProtoTestYdb::TestExportStructYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantYdb >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToExclusive >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] Test command err: PrepareShardPrograms (491): too many shard readsets (2 > 1), src tables: [200:301:0], dst tables: [200:301:0] Type { Kind: Struct } >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectRow >> TMiniKQLEngineFlatTest::TestSelectRangeToExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoShards [GOOD] >> KikimrIcGateway::TestDropExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalDataSource >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayload >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable >> TMiniKQLProgramBuilderTest::TestSelectRow [GOOD] >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestSelectRowPayload [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey >> KikimrIcGateway::TestCreateExternalTable [GOOD] >> KikimrIcGateway::TestCreateSameExternalTable >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowManyShards >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns >> TMiniKQLEngineFlatTest::TestSelectRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowNoShards >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterName >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSomePushDown >> TMiniKQLProgramBuilderTest::TestInvalidParameterName [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterType >> TMiniKQLEngineFlatTest::TestSomePushDown [GOOD] >> TMiniKQLEngineFlatTest::TestTakePushdown >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |87.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> KqpRboYql::BasicJoins [GOOD] >> TTxDataShardSampleKScan::BadRequest [GOOD] >> TTxDataShardSampleKScan::RunScan |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListTypeYdb >> TMiniKQLProtoTestYdb::TestExportListTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportIntegralYdb >> KikimrIcGateway::TestSecretsExistingValidation-UseSchemaSecrets [GOOD] >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings >> TMiniKQLProtoTestYdb::TestExportIntegralYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb [GOOD] >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalYdb >> TMiniKQLProtoTestYdb::TestExportOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListYdb >> TMiniKQLProtoTestYdb::TestExportListYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] >> TMiniKQLProgramBuilderTest::TestUpdateRowStaticKey >> TMiniKQLProgramBuilderTest::TestUpdateRowStaticKey [GOOD] >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportBoolYdb >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets >> TMiniKQLProtoTestYdb::TestExportBoolYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDoubleYdb >> TMiniKQLEngineFlatTest::TestEmptyProgram >> TMiniKQLEngineFlatTest::TestEmptyProgram [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRow [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNullKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRboYql::BasicJoins [GOOD] Test command err: Trying to start YDB, gRPC: 13640, MsgBus: 10098 2026-01-08T15:34:05.019257Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014464867660425:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:05.019315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:05.298924Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:05.407387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:05.407485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:05.488504Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:05.498955Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014460572693093:2081] 1767886445017810 != 1767886445017813 2026-01-08T15:34:05.532905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:05.551278Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:05.900740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:05.900770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:05.900776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:05.900889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:06.042764Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:06.571388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:06.595500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:08.951064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014477752563167:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:08.951219Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:08.952876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014477752563177:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:08.952960Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:09.296435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_table.cpp:814) 2026-01-08T15:34:09.959109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:09.959333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:09.959537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:09.959627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:09.959714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:09.959814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:09.959894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:09.959976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:09.960055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:09.960155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:09.960245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:09.960321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:09.960403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037946;self_id=[1:7593014482047531086:2341];tablet_id=72075186224037946;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:10.007570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:10.007692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:10.007874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:10.007962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:10.008055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:10.008147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:10.008228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:10.008313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:10.008445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:10.008554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:10.008643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:10.008725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:10.008810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037919;self_id=[1:7593014482047531087:2342];tablet_id=72075186224037919;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:10.018490Z node ... 81474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.398523Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038023;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.404152Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038030;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.404230Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038030;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.404249Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038030;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.417350Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038032;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.417559Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038032;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.417581Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038032;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.430231Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038072;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.430309Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038072;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.432609Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038072;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.441290Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038035;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.441363Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038035;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.441381Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038035;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.448123Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038055;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.448197Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038055;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.448214Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038055;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.456480Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038025;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.456551Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038025;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.456568Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038025;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.457397Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038059;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.457439Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038059;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.457452Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038059;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.466826Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038019;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.466902Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038019;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.466920Z node 3 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224038019;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=40;result=not_found; 2026-01-08T15:34:38.489116Z node 3 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01kef3xbyde0fmhgv805x8xfdh", SessionId: ydb://session/3?node_id=3&id=YmQ2M2Y5NjAtZTM4NzA5ZjYtNTdlMTI0YjktNjBjMGQ3ZjU=, Slow query, duration: 11.465532s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE `/Root/t1` (\n a Int64\tNOT NULL,\n\t b Int64,\n primary key(a)\n ) WITH (Store = Column);\n\n CREATE TABLE `/Root/t2` (\n a Int64\tNOT NULL,\n\t b Int64,\n primary key(a)\n ) WITH (Store = Column);\n\n CREATE TABLE `/Root/t3` (\n a Int64\tNOT NULL,\n\t b Int64,\n primary key(a)\n ) WITH (Store = Column);\n ", parameters: 0b 2026-01-08T15:34:38.638534Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014608364745519:3351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:38.638645Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:38.639160Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014608364745524:3354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:38.639212Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014608364745525:3355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:38.639326Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:38.644457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:38.688215Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014608364745528:3356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:34:38.785821Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014608364745579:6610] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:03.297022Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [3:7593014715738936734:11442], status: INTERNAL_ERROR, issues:
: Fatal: Execution, code: 1060
:3:135: Fatal: ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:609: Condition violated: `(isLeftSidePredicate && !isRightSidePredicate) || (isRightSidePredicate && !isLeftSidePredicate)', code: 1 2026-01-08T15:35:03.298776Z node 3 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=3&id=NDdjYTVmZGEtOTI2MGQ3MWQtNDNhMzliNjgtNTRjMjUxZjA=, ActorId: [3:7593014715738936731:11440], ActorState: ExecuteState, LegacyTraceId: 01kef3yf8h795thnatn3qk53d3, ReplyQueryCompileError, remove tx status# INTERNAL_ERROR issues# { message: "Execution" issue_code: 1060 issues { position { row: 3 column: 135 } message: "ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:609: Condition violated: `(isLeftSidePredicate && !isRightSidePredicate) || (isRightSidePredicate && !isLeftSidePredicate)\'" end_position { row: 3 column: 135 } issue_code: 1 } } tx_id# trace_id# 2026-01-08T15:35:03.447964Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [3:7593014715738936751:11451], status: INTERNAL_ERROR, issues:
: Fatal: Execution, code: 1060
:3:143: Fatal: ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:548: OR in join predicate is not supported, code: 1 2026-01-08T15:35:03.458221Z node 3 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=3&id=ZTI5OGJjNTQtNWY0ZDZmMDYtOGU2MTA0M2ItZjZjYzY2MDE=, ActorId: [3:7593014715738936748:11449], ActorState: ExecuteState, LegacyTraceId: 01kef3yfd2ahcv24q4q5qybfbx, ReplyQueryCompileError, remove tx status# INTERNAL_ERROR issues# { message: "Execution" issue_code: 1060 issues { position { row: 3 column: 143 } message: "ydb/core/kqp/opt/rbo/kqp_rewrite_select.cpp:548: OR in join predicate is not supported" end_position { row: 3 column: 143 } issue_code: 1 } } tx_id# trace_id# |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/rbo/unittest >> TMiniKQLProtoTestYdb::TestExportDoubleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalYdb >> TMiniKQLEngineFlatTest::TestEraseRowNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowManyShards >> TMiniKQLProtoTestYdb::TestExportDecimalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] Test command err: Trying to start YDB, gRPC: 11179, MsgBus: 10125 2026-01-08T15:34:57.495892Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014687813030345:2170];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:57.496156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:58.026422Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:58.047982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:58.048103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:58.294683Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014687813030202:2081] 1767886497475575 != 1767886497475578 2026-01-08T15:34:58.305979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:58.314365Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:58.335560Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:58.558533Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:58.654080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:58.654102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:58.654166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:58.654256Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:59.340255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:59.354971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:59.416909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.621613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.844944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.932897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.497060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014687813030345:2170];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:02.500180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:02.954980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014709287868599:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.955152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.955879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014709287868609:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.955954Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.360630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.436226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.512914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.577366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.652774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.749504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.830565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.987995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:04.148917Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014717877804089:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:04.149011Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:04.149464Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014717877804094:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:04.149503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014717877804095:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:04.149613Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:04.154045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:04.173129Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014717877804098:2492], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:04.273419Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014717877804149:3791] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> TMiniKQLEngineFlatTest::TestEraseRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Success >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictYdb >> TMiniKQLEngineFlatTest::TestCASBoth2Success [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNoShards >> TMiniKQLProtoTestYdb::TestExportDictYdb [GOOD] >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestDiagnostics [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> KikimrIcGateway::TestCreateSameExternalTable [GOOD] >> KikimrIcGateway::TestCreateResourcePool >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestLengthPushdown >> KikimrIcGateway::TestDropExternalDataSource [GOOD] >> KikimrIcGateway::TestDropResourcePool |87.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query >> TMiniKQLEngineFlatTest::TestLengthPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestInternalResult >> TMiniKQLEngineFlatTest::TestInternalResult [GOOD] >> TMiniKQLEngineFlatTest::TestIndependentSelects >> TMiniKQLEngineFlatTest::TestIndependentSelects [GOOD] >> TMiniKQLEngineFlatTest::TestCrossTableRs >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] |87.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/ydb-core-tx-schemeshard-ut_resource_pool |87.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/ydb-core-tx-schemeshard-ut_resource_pool |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/ydb-core-tx-schemeshard-ut_resource_pool |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_secret_reboots/ydb-core-tx-schemeshard-ut_secret_reboots |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_secret_reboots/ydb-core-tx-schemeshard-ut_secret_reboots |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_secret_reboots/ydb-core-tx-schemeshard-ut_secret_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] Test command err: SetProgram (370): ydb/core/engine/mkql_engine_flat.cpp:183: ExtractResultType(): requirement !label.StartsWith(TxInternalResultPrefix) failed. Label can't be used in SetResult as it's reserved for internal purposes: __cantuse PrepareShardPrograms (491): too many shard readsets (1 > 0), src tables: [200:301:0], dst tables: [200:302:0] Type { Kind: Struct } |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/engine/ut/unittest >> BsControllerConfig::Basic >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets-UseAuthToken [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets-UseAuthToken >> BasicUsage::RetryDiscoveryWithCancel [GOOD] >> BasicUsage::RecreateObserver |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithGlobalSyncIndex |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> BsControllerConfig::OverlayMap >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 >> KqpBatchDelete::SimpleOnePartition [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets >> DataShardVolatile::DistributedUpsertRestartAfterPlan+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan-UseSink >> TKeyValueTest::TestGetStatusWorks [GOOD] >> AsyncIndexChangeCollector::UpsertSingleRow >> BsControllerConfig::OverlayMap [GOOD] >> BsControllerConfig::OverlayMapCrossReferences >> TxUsage::WriteToTopic_Demo_47_Query [GOOD] >> TTopicApiDescribes::GetLocalDescribe [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table >> AsyncIndexChangeCollector::InsertSingleRow |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:56:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:59:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:58:2099] sender: [1:76:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:56:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:59:2057] recipient: [2:54:2097] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:76:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:78:2057] recipient: [2:39:2086] Leader for TabletID 72057594037927937 is [2:58:2099] sender: [2:81:2057] recipient: [2:80:2112] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:83:2057] recipient: [2:80:2112] !Reboot 72057594037927937 (actor [2:58:2099]) rebooted! !Reboot 72057594037927937 (actor [2:58:2099]) tablet resolver refreshed! new actor is[2:82:2113] Leader for TabletID 72057594037927937 is [2:82:2113] sender: [2:198:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:56:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:59:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:76:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:78:2057] recipient: [3:39:2086] Leader for TabletID 72057594037927937 is [3:58:2099] sender: [3:81:2057] recipient: [3:80:2112] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:83:2057] recipient: [3:80:2112] !Reboot 72057594037927937 (actor [3:58:2099]) rebooted! !Reboot 72057594037927937 (actor [3:58:2099]) tablet resolver refreshed! new actor is[3:82:2113] Leader for TabletID 72057594037927937 is [3:82:2113] sender: [3:198:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:56:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:59:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:76:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:79:2057] recipient: [4:39:2086] Leader for TabletID 72057594037927937 is [4:58:2099] sender: [4:82:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:84:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:58:2099]) rebooted! !Reboot 72057594037927937 (actor [4:58:2099]) tablet resolver refreshed! new actor is[4:83:2113] Leader for TabletID 72057594037927937 is [4:83:2113] sender: [4:199:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:56:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:59:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:76:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:82:2057] recipient: [5:39:2086] Leader for TabletID 72057594037927937 is [5:58:2099] sender: [5:85:2057] recipient: [5:84:2115] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:87:2057] recipient: [5:84:2115] !Reboot 72057594037927937 (actor [5:58:2099]) rebooted! !Reboot 72057594037927937 (actor [5:58:2099]) tablet resolver refreshed! new actor is[5:86:2116] Leader for TabletID 72057594037927937 is [5:86:2116] sender: [5:202:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:56:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:59:2057] recipient: [6:53:2097] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:76:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:82:2057] recipient: [6:39:2086] Leader for TabletID 72057594037927937 is [6:58:2099] sender: [6:85:2057] recipient: [6:84:2115] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:87:2057] recipient: [6:84:2115] !Reboot 72057594037927937 (actor [6:58:2099]) rebooted! !Reboot 72057594037927937 (actor [6:58:2099]) tablet resolver refreshed! new actor is[6:86:2116] Leader for TabletID 72057594037927937 is [6:86:2116] sender: [6:202:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:56:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:59:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:76:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:83:2057] recipient: [7:39:2086] Leader for TabletID 72057594037927937 is [7:58:2099] sender: [7:86:2057] recipient: [7:85:2115] Leader for TabletID 72057594037927937 is [7:87:2116] sender: [7:88:2057] recipient: [7:85:2115] !Reboot 72057594037927937 (actor [7:58:2099]) rebooted! !Reboot 72057594037927937 (actor [7:58:2099]) tablet resolver refreshed! new actor is[7:87:2116] Leader for TabletID 72057594037927937 is [7:87:2116] sender: [7:203:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:56:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:59:2057] recipient: [8:52:2097] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:76:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:85:2057] recipient: [8:39:2086] Leader for TabletID 72057594037927937 is [8:58:2099] sender: [8:88:2057] recipient: [8:87:2117] Leader for TabletID 72057594037927937 is [8:89:2118] sender: [8:90:2057] recipient: [8:87:2117] !Reboot 72057594037927937 (actor [8:58:2099]) rebooted! !Reboot 72057594037927937 (actor [8:58:2099]) tablet resolver refreshed! new actor is[8:89:2118] Leader for TabletID 72057594037927937 is [8:89:2118] sender: [8:205:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:56:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:59:2057] recipient: [9:54:2097] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:76:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:85:2057] recipient: [9:39:2086] Leader for TabletID 72057594037927937 is [9:58:2099] sender: [9:88:2057] recipient: [9:87:2117] Leader for TabletID 72057594037927937 is [9:89:2118] sender: [9:90:2057] recipient: [9:87:2117] !Reboot 72057594037927937 (actor [9:58:2099]) rebooted! !Reboot 72057594037927937 (actor [9:58:2099]) tablet resolver refreshed! new actor is[9:89:2118] Leader for TabletID 72057594037927937 is [9:89:2118] sender: [9:205:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:56:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:59:2057] recipient: [10:53:2097] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:76:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:86:2057] recipient: [10:39:2086] Leader for TabletID 72057594037927937 is [10:58:2099] sender: [10:89:2057] recipient: [10:88:2117] Leader for TabletID 72057594037927937 is [10:90:2118] sender: [10:91:2057] recipient: [10:88:2117] !Reboot 72057594037927937 (actor [10:58:2099]) rebooted! !Reboot 72057594037927937 (actor [10:58:2099]) tablet resolver refreshed! new actor is[10:90:2118] Leader for TabletID 72057594037927937 is [10:90:2118] sender: [10:206:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:56:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:59:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:76:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:88:2057] recipient: [11:39:2086] Leader for TabletID 72057594037927937 is [11:58:2099] sender: [11:91:2057] recipient: [11:90:2119] Leader for TabletID 72057594037927937 is [11:92:2120] sender: [11:93:2057] recipient: [11:90:2119] !Reboot 72057594037927937 (actor [11:58:2099]) rebooted! !Reboot 72057594037927937 (actor [11:58:2099]) tablet resolver refreshed! new actor is[11:92:2120] Leader for TabletID 72057594037927937 is [11:92:2120] sender: [11:208:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:56:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:59:2057] recipient: [12:53:2097] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:76:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:88:2057] recipient: [12:39:2086] Leader for TabletID 72057594037927937 is [12:58:2099] sender: [12:91:2057] recipient: [12:90:2119] Leader for TabletID 72057594037927937 is [12:92:2120] sender: [12:93:2057] recipient: [12:90:2119] !Reboot 72057594037927937 (actor [12:58:2099]) rebooted! !Reboot 72057594037927937 (actor [12:58:2099]) tablet resolver refreshed! new actor is[12:92:2120] Leader for TabletID 72057594037927937 is [12:92:2120] sender: [12:208:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:56:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] sender: [13:59:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:58:2099] send ... 1:2057] recipient: [28:90:2119] Leader for TabletID 72057594037927937 is [28:92:2120] sender: [28:93:2057] recipient: [28:90:2119] !Reboot 72057594037927937 (actor [28:58:2099]) rebooted! !Reboot 72057594037927937 (actor [28:58:2099]) tablet resolver refreshed! new actor is[28:92:2120] Leader for TabletID 72057594037927937 is [28:92:2120] sender: [28:208:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:56:2057] recipient: [29:53:2097] Leader for TabletID 72057594037927937 is [29:58:2099] sender: [29:59:2057] recipient: [29:53:2097] Leader for TabletID 72057594037927937 is [29:58:2099] sender: [29:76:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:58:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [29:58:2099] sender: [29:88:2057] recipient: [29:39:2086] Leader for TabletID 72057594037927937 is [29:58:2099] sender: [29:91:2057] recipient: [29:90:2119] Leader for TabletID 72057594037927937 is [29:92:2120] sender: [29:93:2057] recipient: [29:90:2119] !Reboot 72057594037927937 (actor [29:58:2099]) rebooted! !Reboot 72057594037927937 (actor [29:58:2099]) tablet resolver refreshed! new actor is[29:92:2120] Leader for TabletID 72057594037927937 is [29:92:2120] sender: [29:208:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:56:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:58:2099] sender: [30:59:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:58:2099] sender: [30:76:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:58:2099] sender: [30:89:2057] recipient: [30:39:2086] Leader for TabletID 72057594037927937 is [30:58:2099] sender: [30:92:2057] recipient: [30:91:2119] Leader for TabletID 72057594037927937 is [30:93:2120] sender: [30:94:2057] recipient: [30:91:2119] !Reboot 72057594037927937 (actor [30:58:2099]) rebooted! !Reboot 72057594037927937 (actor [30:58:2099]) tablet resolver refreshed! new actor is[30:93:2120] Leader for TabletID 72057594037927937 is [30:93:2120] sender: [30:209:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:56:2057] recipient: [31:54:2097] Leader for TabletID 72057594037927937 is [31:58:2099] sender: [31:59:2057] recipient: [31:54:2097] Leader for TabletID 72057594037927937 is [31:58:2099] sender: [31:76:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:58:2099] sender: [31:92:2057] recipient: [31:39:2086] Leader for TabletID 72057594037927937 is [31:58:2099] sender: [31:95:2057] recipient: [31:94:2122] Leader for TabletID 72057594037927937 is [31:96:2123] sender: [31:97:2057] recipient: [31:94:2122] !Reboot 72057594037927937 (actor [31:58:2099]) rebooted! !Reboot 72057594037927937 (actor [31:58:2099]) tablet resolver refreshed! new actor is[31:96:2123] Leader for TabletID 72057594037927937 is [31:96:2123] sender: [31:212:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:56:2057] recipient: [32:52:2097] Leader for TabletID 72057594037927937 is [32:58:2099] sender: [32:59:2057] recipient: [32:52:2097] Leader for TabletID 72057594037927937 is [32:58:2099] sender: [32:76:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:58:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:58:2099] sender: [32:92:2057] recipient: [32:39:2086] Leader for TabletID 72057594037927937 is [32:58:2099] sender: [32:95:2057] recipient: [32:94:2122] Leader for TabletID 72057594037927937 is [32:96:2123] sender: [32:97:2057] recipient: [32:94:2122] !Reboot 72057594037927937 (actor [32:58:2099]) rebooted! !Reboot 72057594037927937 (actor [32:58:2099]) tablet resolver refreshed! new actor is[32:96:2123] Leader for TabletID 72057594037927937 is [32:96:2123] sender: [32:212:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:56:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:58:2099] sender: [33:59:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:58:2099] sender: [33:76:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:56:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:58:2099] sender: [34:59:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:58:2099] sender: [34:76:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:56:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:58:2099] sender: [35:59:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:58:2099] sender: [35:76:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [35:58:2099] sender: [35:78:2057] recipient: [35:39:2086] Leader for TabletID 72057594037927937 is [35:58:2099] sender: [35:81:2057] recipient: [35:80:2112] Leader for TabletID 72057594037927937 is [35:82:2113] sender: [35:83:2057] recipient: [35:80:2112] !Reboot 72057594037927937 (actor [35:58:2099]) rebooted! !Reboot 72057594037927937 (actor [35:58:2099]) tablet resolver refreshed! new actor is[35:82:2113] Leader for TabletID 72057594037927937 is [35:82:2113] sender: [35:198:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:56:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:58:2099] sender: [36:59:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:58:2099] sender: [36:76:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [36:58:2099] sender: [36:78:2057] recipient: [36:39:2086] Leader for TabletID 72057594037927937 is [36:58:2099] sender: [36:81:2057] recipient: [36:80:2112] Leader for TabletID 72057594037927937 is [36:82:2113] sender: [36:83:2057] recipient: [36:80:2112] !Reboot 72057594037927937 (actor [36:58:2099]) rebooted! !Reboot 72057594037927937 (actor [36:58:2099]) tablet resolver refreshed! new actor is[36:82:2113] Leader for TabletID 72057594037927937 is [36:82:2113] sender: [36:198:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:56:2057] recipient: [37:52:2097] Leader for TabletID 72057594037927937 is [37:58:2099] sender: [37:59:2057] recipient: [37:52:2097] Leader for TabletID 72057594037927937 is [37:58:2099] sender: [37:76:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [37:58:2099] sender: [37:79:2057] recipient: [37:39:2086] Leader for TabletID 72057594037927937 is [37:58:2099] sender: [37:82:2057] recipient: [37:81:2112] Leader for TabletID 72057594037927937 is [37:83:2113] sender: [37:84:2057] recipient: [37:81:2112] !Reboot 72057594037927937 (actor [37:58:2099]) rebooted! !Reboot 72057594037927937 (actor [37:58:2099]) tablet resolver refreshed! new actor is[37:83:2113] Leader for TabletID 72057594037927937 is [37:83:2113] sender: [37:199:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:56:2057] recipient: [38:54:2097] Leader for TabletID 72057594037927937 is [38:58:2099] sender: [38:59:2057] recipient: [38:54:2097] Leader for TabletID 72057594037927937 is [38:58:2099] sender: [38:76:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:58:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [38:58:2099] sender: [38:81:2057] recipient: [38:39:2086] Leader for TabletID 72057594037927937 is [38:58:2099] sender: [38:84:2057] recipient: [38:83:2114] Leader for TabletID 72057594037927937 is [38:85:2115] sender: [38:86:2057] recipient: [38:83:2114] !Reboot 72057594037927937 (actor [38:58:2099]) rebooted! !Reboot 72057594037927937 (actor [38:58:2099]) tablet resolver refreshed! new actor is[38:85:2115] Leader for TabletID 72057594037927937 is [38:85:2115] sender: [38:201:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:56:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:58:2099] sender: [39:59:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:58:2099] sender: [39:76:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:58:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [39:58:2099] sender: [39:81:2057] recipient: [39:39:2086] Leader for TabletID 72057594037927937 is [39:58:2099] sender: [39:84:2057] recipient: [39:83:2114] Leader for TabletID 72057594037927937 is [39:85:2115] sender: [39:86:2057] recipient: [39:83:2114] !Reboot 72057594037927937 (actor [39:58:2099]) rebooted! !Reboot 72057594037927937 (actor [39:58:2099]) tablet resolver refreshed! new actor is[39:85:2115] Leader for TabletID 72057594037927937 is [39:85:2115] sender: [39:201:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:56:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:58:2099] sender: [40:59:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:58:2099] sender: [40:76:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:58:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [40:58:2099] sender: [40:82:2057] recipient: [40:39:2086] Leader for TabletID 72057594037927937 is [40:58:2099] sender: [40:85:2057] recipient: [40:84:2114] Leader for TabletID 72057594037927937 is [40:86:2115] sender: [40:87:2057] recipient: [40:84:2114] !Reboot 72057594037927937 (actor [40:58:2099]) rebooted! !Reboot 72057594037927937 (actor [40:58:2099]) tablet resolver refreshed! new actor is[40:86:2115] Leader for TabletID 72057594037927937 is [40:86:2115] sender: [40:202:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:56:2057] recipient: [41:53:2097] Leader for TabletID 72057594037927937 is [41:58:2099] sender: [41:59:2057] recipient: [41:53:2097] Leader for TabletID 72057594037927937 is [41:58:2099] sender: [41:76:2057] recipient: [41:14:2061] |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/keyvalue/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 28375, MsgBus: 18807 2026-01-08T15:34:28.919989Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014564496544344:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:28.926469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:29.466444Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:29.503111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:29.503239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:29.580825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:29.758882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014564496544300:2081] 1767886468881700 != 1767886468881703 2026-01-08T15:34:29.763983Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:29.785663Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:29.809587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:29.809618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:29.809644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:29.809729Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:29.951050Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:30.565415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:30.687874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:30.898022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:31.138260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:31.250985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.521620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014585971382663:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.521733Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.522563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014585971382673:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.522658Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:33.884858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.905180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014564496544344:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:33.905242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:33.940168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:33.990375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.037210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.101327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.172498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.241275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.307842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:34.403503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014590266350844:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:34.403632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:34.404183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014590266350850:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:34.404203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014590266350849:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:34.404237Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:34.408713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:34.423101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014590266350853:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:34:34.492879Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014590266350906:3773] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18422, MsgBus: 19033 2026-01-08T15:34:40.783557Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:40.785069Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014615847514927:2081] 1767886480612022 != 176788648061202 ... TADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014711615867760:2143];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:02.421464Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:02.431576Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:02.605740Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:02.605836Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:02.629639Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:02.635934Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014711615867647:2081] 1767886502387872 != 1767886502387875 2026-01-08T15:35:02.646442Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:02.665991Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:02.773330Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:02.773353Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:02.773361Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:02.773431Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:03.181007Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:03.259605Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:03.272866Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:03.282990Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.404232Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.422525Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:03.765903Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.897065Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.981151Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014728795738698:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.981249Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.981739Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014728795738708:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.981792Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.240474Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.299401Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.347196Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.381954Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.394433Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014711615867760:2143];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:07.394496Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:07.435583Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.509438Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.663248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.741621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:07.844595Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014733090706877:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.844690Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.845023Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014733090706882:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.845083Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014733090706883:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.845185Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:07.850081Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:07.866931Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014733090706886:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:07.945689Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014733090706939:3771] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KikimrIcGateway::TestCreateResourcePool [GOOD] >> KikimrIcGateway::TestAlterResourcePool >> KikimrIcGateway::TestDropResourcePool [GOOD] >> KikimrIcGateway::TestCreateStreamingQuery |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 >> CdcStreamChangeCollector::UpsertManyRows >> BsControllerConfig::Basic [GOOD] >> BsControllerConfig::CommandRollbackWhenAlone >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithGlobalSyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/compute/ut/unittest >> TSyncComputeActorTest::InputTransformMultichannel 2026-01-08 15:35:11,706 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2026-01-08 15:35:12,097 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 45964 58.6M 58.2M 32.6M test_tool run_ut @/home/runner/.ya/build/build_root/663n/00412e/ydb/library/yql/dq/actors/compute/ut/test-results/unittest/testing_out_stuff/test_tool.args 46491 1.3G 1.2G 1.2G └─ ydb-library-yql-dq-actors-compute-ut --trace-path-append /home/runner/.ya/build/build_root/663n/00412e/ydb/library/yql/dq/actors/compute/ut/test-results/unittest/ytest.re Test command err: 2026-01-08T15:25:20.595725Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert !!watermark failed 2026-01-08T15:25:20.595739Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 NO WATERMARK 2026-01-08T15:25:20.601259Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert !!watermark failed 2026-01-08T15:25:20.601271Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 NO WATERMARK 2026-01-08T15:25:20.613963Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:20.613976Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:20.654623Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:20.654644Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=1 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 NO WATERMARK 2026-01-08T15:25:20.662538Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:01.000000Z == 1970-01-01T00:00:02.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.686549Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert !!watermark failed 2026-01-08T15:25:20.686562Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 NO WATERMARK 2026-01-08T15:25:20.698627Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:20.698639Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:20.719028Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:20.719037Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=2 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 NO WATERMARK 2026-01-08T15:25:20.727990Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:02.000000Z == 1970-01-01T00:00:03.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.733990Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert !!watermark failed 2026-01-08T15:25:20.734001Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 NO WATERMARK 2026-01-08T15:25:20.743232Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:20.743240Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:20.850665Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:20.850678Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=3 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 NO WATERMARK 2026-01-08T15:25:20.866568Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:03.000000Z == 1970-01-01T00:00:04.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.878458Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:03.000000Z == 1970-01-01T00:00:04.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.910560Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:20.910583Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:20.943547Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:20.943558Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=4 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 NO WATERMARK 2026-01-08T15:25:20.949303Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:04.000000Z == 1970-01-01T00:00:05.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.956711Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:04.000000Z == 1970-01-01T00:00:05.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:20.965840Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:20.965859Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:20.986920Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:20.986929Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=5 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 NO WATERMARK 2026-01-08T15:25:20.998989Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=51 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:50.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.010985Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=51 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:48.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:25:21.027829Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=51 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:48.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:25:21.059572Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=51 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:42.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 9.000000s 2026-01-08T15:25:21.082332Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=128 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:07.000000Z == 1970-01-01T00:02:08.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.112289Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=128 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:04.000000Z == 1970-01-01T00:02:08.000000Z, Watermark Delay is 4.000000s 2026-01-08T15:25:21.146515Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=128 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:07.000000Z == 1970-01-01T00:02:08.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.190460Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=128 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:07.000000Z == 1970-01-01T00:02:08.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.235704Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=251 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:10.000000Z == 1970-01-01T00:04:11.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.283269Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=251 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:10.000000Z == 1970-01-01T00:04:11.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.346118Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=251 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:10.000000Z == 1970-01-01T00:04:11.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.419008Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=251 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:10.000000Z == 1970-01-01T00:04:11.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.425094Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:834: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=1 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:06.000000Z == 1970-01-01T00:00:07.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:25:21.431436Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 Assert !!watermark failed 2026-01-08T15:25:21.431447Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=3 NO WATERMARK 2026-01-08T15:25:21.440799Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 Assert !!watermark failed 2026-01-08T15:25:21.440809Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=7 NO WATERMARK 2026-01-08T15:25:21.455897Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:831: Square Test for: packets=7 watermarkPeriod=1 waitIntermediateAcks=0 channels=16 Assert !!watermark failed 2026-01-08T15:25:21.455906Z :KQP_COMPUTE ERROR: dq_async_compute_actor_ut.cpp:837: Square Test for: packets=7 watermarkPeriod=1 waitInterme ... P_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=673 watermarkPeriod=1 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:11:02.000000Z == 1970-01-01T00:11:13.000000Z, Watermark Delay is 11.000000s 2026-01-08T15:34:23.457695Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=343 watermarkPeriod=1 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:05:42.000000Z == 1970-01-01T00:05:43.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:34:24.124696Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=343 watermarkPeriod=1 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:05:42.000000Z == 1970-01-01T00:05:43.000000Z, Watermark Delay is 1.000000s 2026-01-08T15:34:25.609349Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=567 watermarkPeriod=1 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:25.000000Z == 1970-01-01T00:09:27.000000Z, Watermark Delay is 2.000000s 2026-01-08T15:34:26.878073Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=567 watermarkPeriod=1 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:25.000000Z == 1970-01-01T00:09:27.000000Z, Watermark Delay is 2.000000s 2026-01-08T15:34:28.033125Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=275 watermarkPeriod=1 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:23.000000Z == 1970-01-01T00:04:35.000000Z, Watermark Delay is 12.000000s 2026-01-08T15:34:28.143113Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=3 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert !!watermark failed 2026-01-08T15:34:28.143129Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=3 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 NO WATERMARK 2026-01-08T15:34:28.163944Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=3 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert !!watermark failed 2026-01-08T15:34:28.163961Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=3 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 NO WATERMARK 2026-01-08T15:34:28.190270Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=4 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert !!watermark failed 2026-01-08T15:34:28.190286Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=4 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 NO WATERMARK 2026-01-08T15:34:28.206060Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=4 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert !!watermark failed 2026-01-08T15:34:28.206075Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=4 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 NO WATERMARK 2026-01-08T15:34:28.231843Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=5 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert !!watermark failed 2026-01-08T15:34:28.231859Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=5 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 NO WATERMARK 2026-01-08T15:34:28.248387Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=5 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert !!watermark failed 2026-01-08T15:34:28.248402Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=5 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 NO WATERMARK 2026-01-08T15:34:28.326186Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=51 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:48.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:28.379058Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=51 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:48.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:28.419732Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=51 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:00:42.000000Z == 1970-01-01T00:00:51.000000Z, Watermark Delay is 9.000000s 2026-01-08T15:34:28.600319Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=128 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:03.000000Z == 1970-01-01T00:02:06.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:29.703973Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert !!watermark failed 2026-01-08T15:34:29.703982Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 NO WATERMARK 2026-01-08T15:34:29.711350Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert !!watermark failed 2026-01-08T15:34:29.711363Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 NO WATERMARK 2026-01-08T15:34:29.723309Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:845: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert !!watermark failed 2026-01-08T15:34:29.723318Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:851: Square Test for: packets=7 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 NO WATERMARK 2026-01-08T15:34:31.540228Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=173 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:45.000000Z == 1970-01-01T00:02:51.000000Z, Watermark Delay is 6.000000s 2026-01-08T15:34:32.033222Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=136 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:02:12.000000Z == 1970-01-01T00:02:15.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:32.790755Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=294 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:39.000000Z == 1970-01-01T00:04:54.000000Z, Watermark Delay is 15.000000s 2026-01-08T15:34:35.340571Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=656 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:10:45.000000Z == 1970-01-01T00:10:54.000000Z, Watermark Delay is 9.000000s 2026-01-08T15:34:36.385713Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=468 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:07:39.000000Z == 1970-01-01T00:07:48.000000Z, Watermark Delay is 9.000000s 2026-01-08T15:34:37.923474Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=434 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:06:42.000000Z == 1970-01-01T00:07:12.000000Z, Watermark Delay is 30.000000s 2026-01-08T15:34:40.433752Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=556 watermarkPeriod=3 waitIntermediateAcks=1 channels=7 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:12.000000Z == 1970-01-01T00:09:15.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:40.842663Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=556 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:06.000000Z == 1970-01-01T00:09:15.000000Z, Watermark Delay is 9.000000s 2026-01-08T15:34:42.984891Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=673 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:10:06.000000Z == 1970-01-01T00:11:12.000000Z, Watermark Delay is 66.000000s 2026-01-08T15:34:43.607895Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=343 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:05:12.000000Z == 1970-01-01T00:05:42.000000Z, Watermark Delay is 30.000000s 2026-01-08T15:34:44.161932Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=343 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:05:03.000000Z == 1970-01-01T00:05:42.000000Z, Watermark Delay is 39.000000s 2026-01-08T15:34:45.414742Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=567 watermarkPeriod=3 waitIntermediateAcks=1 channels=3 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:24.000000Z == 1970-01-01T00:09:27.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:46.478015Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=567 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:09:24.000000Z == 1970-01-01T00:09:27.000000Z, Watermark Delay is 3.000000s 2026-01-08T15:34:47.309741Z :KQP_COMPUTE ERROR: dq_sync_compute_actor_ut.cpp:848: Square Test for: packets=275 watermarkPeriod=3 waitIntermediateAcks=1 channels=16 Assert *watermark == expectedWatermark failed Expected 1970-01-01T00:04:15.000000Z == 1970-01-01T00:04:33.000000Z, Watermark Delay is 18.000000s Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/10669990147/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/663n/00412e/ydb/library/yql/dq/actors/compute/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1772, in main res.wait(check_exit_code=False, timeout=current_run_test_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/10669990147/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/663n/00412e/ydb/library/yql/dq/actors/compute/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |87.7%| [TM] {BAZEL_UPLOAD} ydb/library/yql/dq/actors/compute/ut/unittest >> CdcStreamChangeCollector::InsertSingleRow ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetLocalDescribe [GOOD] Test command err: 2026-01-08T15:34:50.185507Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014657809620980:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:50.185562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:50.226524Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 131077 Duration# 0.008477s 2026-01-08T15:34:50.398916Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:50.437344Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:34:50.480420Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:34:51.058427Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:51.058580Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:34:51.091568Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:51.190794Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:51.226599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:34:51.306869Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:51.483592Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:51.562410Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:51.719043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:51.719161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:51.752391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:51.752512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:51.759334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:51.768611Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:34:51.782842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:51.808590Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:51.826482Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014657809620945:2082] 1767886490168798 != 1767886490168801 2026-01-08T15:34:51.931489Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:52.314539Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:52.741657Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:52.940071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0022fe/r3tmp/yandexX2AW1e.tmp 2026-01-08T15:34:52.940181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0022fe/r3tmp/yandexX2AW1e.tmp 2026-01-08T15:34:52.946711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0022fe/r3tmp/yandexX2AW1e.tmp 2026-01-08T15:34:52.947024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:53.714441Z INFO: TTestServer started on Port 65186 GrpcPort 8818 PQClient connected to localhost:8818 2026-01-08T15:34:54.309094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:54.510645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:55.186485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014657809620980:2070];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:55.186554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:57.378566Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014690435697127:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.378701Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.379292Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014690435697136:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.379343Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.379500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014687874393518:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.379806Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.380332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014687874393531:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.380400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.388987Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014690435697141:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.389728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014687874393536:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.429723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:57.436848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014687874393571:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.436940Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.437357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014687874393574:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.437401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:57.454181Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014690435697144:2184] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right no ... init complete for topic 'rt3.dc1--topic-x' partition 10 generation 1 [1:7593014739414002507:2481] 2026-01-08T15:35:09.729624Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037892][Partition][0][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 0 generation 1 [2:7593014741975305255:2387] 2026-01-08T15:35:09.731251Z node 2 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037894][Partition][8][StateInit] bootstrapping 8 [2:7593014741975305256:2388] 2026-01-08T15:35:09.733089Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037898][Partition][14][StateInit] bootstrapping 14 [1:7593014739414002517:2480] 2026-01-08T15:35:09.733405Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037894][Partition][8][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 8 generation 1 [2:7593014741975305256:2388] 2026-01-08T15:35:09.735169Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72075186224037898][Partition][14][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 1 [1:7593014739414002517:2480] 2026-01-08T15:35:09.735983Z node 2 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037894][Partition][12][StateInit] bootstrapping 12 [2:7593014741975305257:2388] 2026-01-08T15:35:09.736764Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037898][Partition][11][StateInit] bootstrapping 11 [1:7593014739414002518:2480] 2026-01-08T15:35:09.737847Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037894][Partition][12][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 12 generation 1 [2:7593014741975305257:2388] 2026-01-08T15:35:09.739238Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72075186224037898][Partition][11][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 11 generation 1 [1:7593014739414002518:2480] 2026-01-08T15:35:09.746958Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037893][Partition][1][StateInit] bootstrapping 1 [1:7593014739414002525:2479] 2026-01-08T15:35:09.749095Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72075186224037893][Partition][1][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 1 generation 1 [1:7593014739414002525:2479] 2026-01-08T15:35:09.749288Z node 2 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037895][Partition][9][StateInit] bootstrapping 9 [2:7593014741975305258:2389] 2026-01-08T15:35:09.751300Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037895][Partition][9][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 9 generation 1 [2:7593014741975305258:2389] ===Query complete 2026-01-08T15:35:09.751138Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037893][Partition][6][StateInit] bootstrapping 6 [1:7593014739414002528:2479] 2026-01-08T15:35:09.753141Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72075186224037893][Partition][6][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 6 generation 1 [1:7593014739414002528:2479] 2026-01-08T15:35:09.760700Z node 2 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037897][Partition][13][StateInit] bootstrapping 13 [2:7593014741975305264:2386] 2026-01-08T15:35:09.761832Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.762698Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037897][Partition][13][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 1 [2:7593014741975305264:2386] 2026-01-08T15:35:09.764768Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037899] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.766221Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037898] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.771514Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.772477Z node 2 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037895][Partition][2][StateInit] bootstrapping 2 [2:7593014741975305259:2389] 2026-01-08T15:35:09.783547Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037893] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.789225Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.795417Z node 2 :PERSQUEUE INFO: partition.cpp:712: [72075186224037895][Partition][2][StateInit] init complete for topic 'rt3.dc1--topic-x' partition 2 generation 1 [2:7593014741975305259:2389] 2026-01-08T15:35:09.798399Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:09.805781Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig Create topic result: 1 2026-01-08T15:35:09.881542Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:668: DescribeTopicImpl [1:7593014739414002614:4089]: Request location 2026-01-08T15:35:09.882063Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002623:4092] connected; active server actors: 1 2026-01-08T15:35:09.882544Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037892, PartitionId 0, NodeId 2, Generation 1 2026-01-08T15:35:09.882579Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037893, PartitionId 1, NodeId 1, Generation 1 2026-01-08T15:35:09.882593Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037895, PartitionId 2, NodeId 2, Generation 1 2026-01-08T15:35:09.882605Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037892, PartitionId 3, NodeId 2, Generation 1 2026-01-08T15:35:09.882616Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037899, PartitionId 4, NodeId 1, Generation 1 2026-01-08T15:35:09.882625Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037896, PartitionId 5, NodeId 1, Generation 1 2026-01-08T15:35:09.882633Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037893, PartitionId 6, NodeId 1, Generation 1 2026-01-08T15:35:09.882644Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037897, PartitionId 7, NodeId 2, Generation 1 2026-01-08T15:35:09.882653Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037894, PartitionId 8, NodeId 2, Generation 1 2026-01-08T15:35:09.882662Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037895, PartitionId 9, NodeId 2, Generation 1 2026-01-08T15:35:09.882672Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037896, PartitionId 10, NodeId 1, Generation 1 2026-01-08T15:35:09.882681Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037898, PartitionId 11, NodeId 1, Generation 1 2026-01-08T15:35:09.882690Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037894, PartitionId 12, NodeId 2, Generation 1 2026-01-08T15:35:09.882699Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037897, PartitionId 13, NodeId 2, Generation 1 2026-01-08T15:35:09.882709Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037898, PartitionId 14, NodeId 1, Generation 1 2026-01-08T15:35:09.891875Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:761: DescribeTopicImpl [1:7593014739414002614:4089]: Got location 2026-01-08T15:35:09.892370Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002623:4092] disconnected. 2026-01-08T15:35:09.892413Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002623:4092] disconnected; active server actors: 1 2026-01-08T15:35:09.892435Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1697: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002623:4092] disconnected no session 2026-01-08T15:35:09.895164Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:668: DescribeTopicImpl [1:7593014739414002624:4093]: Request location 2026-01-08T15:35:09.897710Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002626:4095] connected; active server actors: 1 2026-01-08T15:35:09.897804Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037893, PartitionId 1, NodeId 1, Generation 1 2026-01-08T15:35:09.897822Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037892, PartitionId 3, NodeId 2, Generation 1 2026-01-08T15:35:09.897834Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:611: [72075186224037900][rt3.dc1--topic-x] The partition location was added to response: TabletId 72075186224037896, PartitionId 5, NodeId 1, Generation 1 2026-01-08T15:35:09.898515Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:761: DescribeTopicImpl [1:7593014739414002624:4093]: Got location 2026-01-08T15:35:09.902274Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:668: DescribeTopicImpl [1:7593014739414002627:4096]: Request location 2026-01-08T15:35:09.902558Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002626:4095] disconnected. 2026-01-08T15:35:09.902599Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002626:4095] disconnected; active server actors: 1 2026-01-08T15:35:09.902615Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1697: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002626:4095] disconnected no session 2026-01-08T15:35:09.903023Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037900][rt3.dc1--topic-x] pipe [1:7593014739414002629:4098] connected; active server actors: 1 |87.7%| [TM] {BAZEL_UPLOAD} ydb/services/persqueue_v1/ut/describes_ut/unittest >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> BsControllerConfig::CommandRollbackWhenAlone [GOOD] >> TSentinelTests::PDiskErrorState [GOOD] >> TSentinelTests::NodeStatusComputer [GOOD] >> TSentinelTests::InitialDeploymentGracePeriod >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithGlobalSyncIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:13.986698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:13.986799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:13.986837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:13.986876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:13.986925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:13.986970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:13.987034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:13.987108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:13.988039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:13.988384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:14.126417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:14.126488Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:14.144810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:14.145182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:14.145331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:14.187892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:14.188413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:14.189233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:14.198897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:14.205992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:14.206236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:14.207606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:14.207677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:14.207826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:14.207876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:14.207941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:14.208104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:14.375886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.376940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.377958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.378057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:14.378130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... s: true ShowPrivateTable: true } 2026-01-08T15:35:16.090871Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:16.090964Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableWithIndexCopy/ValueIndex" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:16.091237Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableWithIndexCopy/ValueIndex" took 259us result status StatusSuccess 2026-01-08T15:35:16.092090Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableWithIndexCopy/ValueIndex" PathDescription { Self { Name: "ValueIndex" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 41 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 43 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 42 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 42 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "ValueIndex" LocalPathId: 42 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 42 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 Index impl table name: indexImplTable 2026-01-08T15:35:16.092713Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1066:3000], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableWithIndexCopy/ValueIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:35:16.092761Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:16.092853Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableWithIndexCopy/ValueIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:16.093175Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableWithIndexCopy/ValueIndex/indexImplTable" took 257us result status StatusSuccess 2026-01-08T15:35:16.093947Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableWithIndexCopy/ValueIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 43 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 42 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 42 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 43 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::CommandRollbackWhenAlone [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:227:2066] recipient: [1:205:2077] 2026-01-08T15:35:12.571147Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:12.572524Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:12.572947Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:12.576032Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:12.576362Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:12.576534Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:12.576559Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:12.576779Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:12.586926Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:12.587056Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:12.587335Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:12.587466Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:12.587582Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:12.587668Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:247:2066] recipient: [1:20:2067] 2026-01-08T15:35:12.599197Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:12.599366Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:12.630977Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:12.631135Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:12.631247Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:12.631355Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:12.631496Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:12.631585Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:12.631643Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:12.631726Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:12.645737Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:12.645890Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:12.659002Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:12.659158Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:12.660695Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:12.660769Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:12.660994Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:12.661061Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:12.679629Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] Leader for TabletID 72057594037932033 is [11:226:2079] sender: [11:227:2066] recipient: [11:208:2077] 2026-01-08T15:35:14.705211Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:14.706164Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:14.706530Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:14.707781Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:14.708204Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:14.708431Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:14.708468Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:14.708615Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:14.723123Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:14.723265Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:14.723387Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:14.723500Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:14.723603Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:14.723710Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:226:2079] sender: [11:247:2066] recipient: [11:20:2067] 2026-01-08T15:35:14.736290Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:14.736459Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:14.767339Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:14.767475Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:14.767553Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:14.767634Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:14.767805Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:14.767893Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:14.767956Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:14.768025Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:14.779349Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:14.779492Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:14.790417Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:14.790568Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:14.792014Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:14.792069Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:14.792292Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:14.792336Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:14.792939Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:26:2057] recipient: [21:23:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:26:2057] recipient: [21:23:2068] Leader for TabletID 72057594037932033 is [21:28:2070] sender: [21:29:2057] recipient: [21:23:2068] 2026-01-08T15:35:16.545916Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:16.546936Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:16.547210Z node 21 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:16.548498Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:16.549063Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:16.549275Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:16.549306Z node 21 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:16.549536Z node 21 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:16.561479Z node 21 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:16.561639Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:16.561745Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:16.561864Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:16.561972Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:16.562068Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [21:28:2070] sender: [21:49:2057] recipient: [21:11:2058] 2026-01-08T15:35:16.578205Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:16.578383Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:16.600240Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:16.600387Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:16.600469Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:16.600582Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:16.600721Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:16.600774Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:16.600826Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:16.600882Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:16.611690Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:16.611819Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:16.622596Z node 21 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:16.622737Z node 21 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:16.625993Z node 21 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:16.626052Z node 21 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:16.626249Z node 21 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:16.626286Z node 21 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:16.628297Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenAlone" } } Rollback: true } Status { Success: true } ErrorDescription: "transaction rollback" RollbackSuccess: true Leader for TabletID 72057594037932033 is [0:0:0] sender: [22:26:2057] recipient: [22:24:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [22:26:2057] recipient: [22:24:2068] Leader for TabletID 72057594037932033 is [22:28:2070] sender: [22:29:2057] recipient: [22:24:2068] 2026-01-08T15:35:16.843279Z node 22 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:16.844440Z node 22 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:16.844694Z node 22 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:16.845861Z node 22 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:16.846424Z node 22 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:16.846779Z node 22 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:16.846815Z node 22 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:16.847016Z node 22 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:16.861145Z node 22 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:16.861321Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:16.861441Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:16.861556Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:16.861663Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:16.861713Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [22:28:2070] sender: [22:49:2057] recipient: [22:11:2058] 2026-01-08T15:35:16.874897Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:16.875138Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:16.896640Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:16.896764Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:16.896839Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:16.896912Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:16.897051Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:16.897117Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:16.897154Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:16.897198Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:16.907978Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:16.908153Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:16.921189Z node 22 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:16.921327Z node 22 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:16.922587Z node 22 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:16.922658Z node 22 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:16.922869Z node 22 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:16.922930Z node 22 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:16.923736Z node 22 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenAlone" } } Rollback: true } Status { Success: true } ErrorDescription: "transaction rollback" RollbackSuccess: true |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query >> TTxDataShardSampleKScan::RunScan [GOOD] >> TTxDataShardSampleKScan::SkipForeign >> TxUsage::Write_And_Read_Small_Messages_1 >> AsyncIndexChangeCollector::UpsertToSameKey >> AsyncIndexChangeCollector::DeleteNothing >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn >> BsControllerConfig::AddDriveSerial >> BsControllerConfig::OverlayMapCrossReferences [GOOD] |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |87.7%| [TM] {RESULT} ydb/library/yql/dq/actors/compute/ut/unittest |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> TResourcePoolTest::ParallelAlterResourcePool >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets-UseAuthToken [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets+UseAuthToken >> TSentinelTests::InitialDeploymentGracePeriod [GOOD] >> TResourcePoolTest::SchemeErrors >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestDropStreamingQuery >> KikimrIcGateway::TestAlterResourcePool [GOOD] >> KikimrIcGateway::TestAlterStreamingQuery >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMapCrossReferences [GOOD] |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 >> TResourcePoolTest::ParallelAlterResourcePool [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel/unittest >> TSentinelTests::InitialDeploymentGracePeriod [GOOD] Test command err: 2026-01-08T15:34:56.147137Z node 1 :CMS DEBUG: sentinel.cpp:1047: [Sentinel] [Main] UpdateConfig 2026-01-08T15:34:56.147191Z node 1 :CMS DEBUG: sentinel.cpp:992: [Sentinel] [Main] Start ConfigUpdater 2026-01-08T15:34:56.147243Z node 1 :CMS DEBUG: sentinel.cpp:1063: [Sentinel] [Main] UpdateState 2026-01-08T15:34:56.147274Z node 1 :CMS INFO: sentinel.cpp:987: [Sentinel] [Main] StateUpdater was delayed 2026-01-08T15:34:56.147321Z node 1 :CMS DEBUG: sentinel.cpp:549: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2026-01-08T15:34:56.147396Z node 1 :CMS DEBUG: sentinel.cpp:562: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2026-01-08T15:34:56.148706Z node 1 :CMS DEBUG: sentinel.cpp:571: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2026-01-08T15:34:56.154025Z node 1 :CMS DEBUG: sentinel.cpp:624: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 100 ... 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 45 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-45.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 46 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-46.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 47 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-47.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7740110 2026-01-08T15:35:19.701002Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 48 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-48.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 49 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-49.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 50 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-50.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 51 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-51.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7740110 2026-01-08T15:35:19.701123Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 52 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-52.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 53 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-53.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 54 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-54.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 55 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-55.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7740110 2026-01-08T15:35:19.701244Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 40 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-40.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 41 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-41.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 42 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-42.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 43 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-43.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7740110 2026-01-08T15:35:19.701298Z node 9 :CMS DEBUG: sentinel.cpp:1068: [Sentinel] [Main] State was updated in 0.000000s 2026-01-08T15:35:19.712214Z node 9 :CMS DEBUG: sentinel.cpp:1063: [Sentinel] [Main] UpdateState 2026-01-08T15:35:19.712282Z node 9 :CMS DEBUG: sentinel.cpp:992: [Sentinel] [Main] Start StateUpdater 2026-01-08T15:35:19.712387Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 9, wbId# [9:8388350642965737326:1634689637] 2026-01-08T15:35:19.712434Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2026-01-08T15:35:19.712469Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2026-01-08T15:35:19.712503Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2026-01-08T15:35:19.712533Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2026-01-08T15:35:19.712563Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2026-01-08T15:35:19.712595Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# [15:8388350642965737326:1634689637] 2026-01-08T15:35:19.712630Z node 9 :CMS DEBUG: sentinel.cpp:786: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 16, wbId# [16:8388350642965737326:1634689637] 2026-01-08T15:35:19.713056Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 9, response# PDiskStateInfo { PDiskId: 36 CreateTime: 0 ChangeTime: 0 Path: "/9/pdisk-36.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 37 CreateTime: 0 ChangeTime: 0 Path: "/9/pdisk-37.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 38 CreateTime: 0 ChangeTime: 0 Path: "/9/pdisk-38.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 39 CreateTime: 0 ChangeTime: 0 Path: "/9/pdisk-39.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.713556Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 64 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-64.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 65 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-65.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 66 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-66.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 67 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-67.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.713846Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 44 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-44.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 45 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-45.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 46 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-46.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 47 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-47.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714006Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 48 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-48.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 49 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-49.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 50 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-50.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 51 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-51.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714143Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 52 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-52.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 53 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-53.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 54 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-54.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 55 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-55.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714266Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 56 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-56.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 57 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-57.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 58 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-58.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 59 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-59.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714415Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 60 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-60.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 61 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-61.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 62 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-62.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 63 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-63.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714547Z node 9 :CMS DEBUG: sentinel.cpp:796: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 40 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-40.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 41 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-41.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 42 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-42.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 43 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-43.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 7800110 2026-01-08T15:35:19.714604Z node 9 :CMS DEBUG: sentinel.cpp:1068: [Sentinel] [Main] State was updated in 0.000000s 2026-01-08T15:35:19.715053Z node 9 :CMS NOTICE: sentinel.cpp:1147: [Sentinel] [Main] PDisk status changed: pdiskId# 13:53, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2026-01-08T15:35:19.715140Z node 9 :CMS DEBUG: sentinel.cpp:1212: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2026-01-08T15:35:19.715397Z node 9 :CMS DEBUG: sentinel.cpp:1343: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 6 2026-01-08T15:35:19.715446Z node 9 :CMS NOTICE: sentinel.cpp:1367: [Sentinel] [Main] PDisk status has been changed: pdiskId# 13:53 |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/cms/ut_sentinel/unittest >> BasicUsage::ConflictingWrites [GOOD] >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> CdcStreamChangeCollector::UpsertToSameKey >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive >> TContinuousBackupWithRebootsTests::Basic [GOOD] >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::ParallelAlterResourcePool [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:20.854823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:20.854916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:20.854960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:20.854995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:20.855074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:20.855121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:20.855187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:20.855247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:20.856198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:20.856518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:20.955142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:20.955216Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:20.984331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:20.984725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:20.984932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:21.035746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:21.036254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:21.037082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:21.042645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:21.058550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:21.058810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:21.060107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:21.060179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:21.060302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:21.060348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:21.060497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:21.060669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:21.284333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.288932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:21.289436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... pp:230: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.698964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.699097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 121, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 115: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.699164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 115: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.699237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 122, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 123, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 116: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.699411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 116: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.699548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 124, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 117: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.699730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 117: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.699878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2026-01-08T15:35:22.699933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 118: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.699954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 118: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 119: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 119: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2026-01-08T15:35:22.700254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 120: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 120: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 128, at schemeshard: 72057594046678944 2026-01-08T15:35:22.700403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 121: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 121: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 122: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 122: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2026-01-08T15:35:22.700758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 123: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 123: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 130, at schemeshard: 72057594046678944 2026-01-08T15:35:22.700897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 124: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.700934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 124: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.700984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 131, at schemeshard: 72057594046678944 2026-01-08T15:35:22.701063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 132, at schemeshard: 72057594046678944 2026-01-08T15:35:22.701376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 128: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 128: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 130: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 130: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.701940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 131: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.701959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 131: satisfy waiter [1:835:2825] 2026-01-08T15:35:22.702063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 132: got EvNotifyTxCompletionResult 2026-01-08T15:35:22.726425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 132: satisfy waiter [1:835:2825] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestWaitNotification: OK eventTxId 105 TestWaitNotification: OK eventTxId 106 TestWaitNotification: OK eventTxId 107 TestWaitNotification: OK eventTxId 108 TestWaitNotification: OK eventTxId 109 TestWaitNotification: OK eventTxId 110 TestWaitNotification: OK eventTxId 111 TestWaitNotification: OK eventTxId 112 TestWaitNotification: OK eventTxId 113 TestWaitNotification: OK eventTxId 114 TestWaitNotification: OK eventTxId 115 TestWaitNotification: OK eventTxId 116 TestWaitNotification: OK eventTxId 117 TestWaitNotification: OK eventTxId 118 TestWaitNotification: OK eventTxId 119 TestWaitNotification: OK eventTxId 120 TestWaitNotification: OK eventTxId 121 TestWaitNotification: OK eventTxId 122 TestWaitNotification: OK eventTxId 123 TestWaitNotification: OK eventTxId 124 TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 TestWaitNotification: OK eventTxId 128 TestWaitNotification: OK eventTxId 129 TestWaitNotification: OK eventTxId 130 TestWaitNotification: OK eventTxId 131 TestWaitNotification: OK eventTxId 132 2026-01-08T15:35:22.730573Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:22.730850Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" took 294us result status StatusSuccess 2026-01-08T15:35:22.731346Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" PathDescription { Self { Name: "MyResourcePool" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 2 Properties { Properties { key: "concurrent_query_limit" value: "20" } Properties { key: "query_cancel_after_seconds" value: "60" } Properties { key: "query_count_limit" value: "50" } } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 >> TResourcePoolTest::SchemeErrors [GOOD] >> TResourcePoolTest::ParallelCreateSameResourcePool |87.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |87.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |87.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> YdbIndexTable::MultiShardTableOneIndex [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:22.189410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:22.189502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:22.189543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:22.189584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:22.189645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:22.189707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:22.189782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:22.189862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:22.199036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:22.199469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:22.335064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:22.335190Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:22.361259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:22.361613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:22.361796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:22.392569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:22.393032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:22.393765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:22.397282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:22.410744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:22.410952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:22.412260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:22.412342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:22.412493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:22.412542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:22.412650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:22.412810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:22.846449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.847972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:22.848595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:35:23.971192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 3 PathOwnerId: 72057594046678944, cookie: 124 2026-01-08T15:35:23.971276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 3 PathOwnerId: 72057594046678944, cookie: 124 2026-01-08T15:35:23.971327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 124 2026-01-08T15:35:23.971357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 124, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 3 2026-01-08T15:35:23.971427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:23.971518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 124, subscribers: 0 2026-01-08T15:35:23.974906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 124 2026-01-08T15:35:23.975054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 124 2026-01-08T15:35:23.977198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 124 2026-01-08T15:35:23.977327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 124 TestModificationResult got TxId: 124, wait until txId: 124 TestWaitNotification wait txId: 124 2026-01-08T15:35:23.977578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 124: send EvNotifyTxCompletion 2026-01-08T15:35:23.977656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 124 2026-01-08T15:35:23.978022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 124, at schemeshard: 72057594046678944 2026-01-08T15:35:23.978137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 124: got EvNotifyTxCompletionResult 2026-01-08T15:35:23.978168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 124: satisfy waiter [1:717:2707] TestWaitNotification: OK eventTxId 124 TestModificationResults wait txId: 125 2026-01-08T15:35:23.981662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "AnotherDir/MyResourcePool" } } TxId: 125 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:35:23.981987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/AnotherDir, operationId: 125:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.982187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:442: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: AnotherDir, child id: [OwnerId: 72057594046678944, LocalPathId: 41], at schemeshard: 72057594046678944 2026-01-08T15:35:23.982260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 0 2026-01-08T15:35:23.982331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 125:0 type: TxMkDir target path: [OwnerId: 72057594046678944, LocalPathId: 41] source path: 2026-01-08T15:35:23.982440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 125:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2026-01-08T15:35:23.982513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046678944] TCreateResourcePool Propose: opId# 125:1, path# /MyRoot/AnotherDir/MyResourcePool 2026-01-08T15:35:23.982629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 125:2, propose status:StatusSchemeError, reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, at schemeshard: 72057594046678944 2026-01-08T15:35:23.985529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:148: Abort operation: IgniteOperation fail to propose a part, opId: 125:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusSchemeError, with reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, tx message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "AnotherDir/MyResourcePool" } } TxId: 125 TabletId: 72057594046678944 2026-01-08T15:35:23.985756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 125:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.991382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 125, response: Status: StatusSchemeError Reason: "Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools" TxId: 125 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:23.991601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 125, database: /MyRoot, subject: , status: StatusSchemeError, reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, operation: CREATE RESOURCE POOL, path: AnotherDir/MyResourcePool TestModificationResult got TxId: 125, wait until txId: 125 TestModificationResults wait txId: 126 2026-01-08T15:35:23.995068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "AnotherDir/MyResourcePool" } } TxId: 126 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:35:23.995502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/.metadata/workload_manager/pools/AnotherDir, operationId: 126:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.995728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:442: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 40], parent name: pools, child name: AnotherDir, child id: [OwnerId: 72057594046678944, LocalPathId: 42], at schemeshard: 72057594046678944 2026-01-08T15:35:23.995798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 42] was 0 2026-01-08T15:35:23.995843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 126:0 type: TxMkDir target path: [OwnerId: 72057594046678944, LocalPathId: 42] source path: 2026-01-08T15:35:23.995934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 126:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2026-01-08T15:35:23.996005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046678944] TCreateResourcePool Propose: opId# 126:1, path# /MyRoot/.metadata/workload_manager/pools/AnotherDir/MyResourcePool 2026-01-08T15:35:23.996169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 126:2, propose status:StatusSchemeError, reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, at schemeshard: 72057594046678944 2026-01-08T15:35:23.999467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:148: Abort operation: IgniteOperation fail to propose a part, opId: 126:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusSchemeError, with reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, tx message: Transaction { WorkingDir: "/MyRoot/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "AnotherDir/MyResourcePool" } } TxId: 126 TabletId: 72057594046678944 2026-01-08T15:35:23.999648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 126:0, at schemeshard: 72057594046678944 2026-01-08T15:35:24.003545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:24.003876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Resource pools shoud be placed in /MyRoot/.metadata/workload_manager/pools, operation: CREATE RESOURCE POOL, path: AnotherDir/MyResourcePool TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2026-01-08T15:35:24.007485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "" } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:35:24.007753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046678944] TCreateResourcePool Propose: opId# 127:0, path# /MyRoot/.metadata/workload_manager/pools/ 2026-01-08T15:35:24.007984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2026-01-08T15:35:24.011003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/.metadata/workload_manager/pools/\', error: path part shouldn\'t be empty" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:24.011308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/', error: path part shouldn't be empty, operation: CREATE RESOURCE POOL, path: TestModificationResult got TxId: 127, wait until txId: 127 |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> TContinuousBackupWithRebootsTests::Basic [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:131:2155] sender: [1:132:2058] recipient: [1:114:2144] 2026-01-08T15:34:40.486885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:40.486999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:40.487039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:40.487088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:40.487138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:40.487172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:40.487256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:40.487335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:40.488274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:40.488598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:40.652805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:8101: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2026-01-08T15:34:40.652904Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:40.653851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:40.670584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:40.670901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:40.671114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:40.679833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:40.680261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:40.681014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:40.681649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:40.684266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:40.684444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:40.685688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:40.685753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:40.685853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:40.685896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:40.685941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:40.686116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:40.861919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.863965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.864075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.864150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.864212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:40.864297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# ... , LocalPathId: 39] was 5 2026-01-08T15:35:23.541697Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 104 2026-01-08T15:35:23.541794Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 104 2026-01-08T15:35:23.541833Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2026-01-08T15:35:23.541877Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2026-01-08T15:35:23.541920Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:35:23.542012Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2026-01-08T15:35:23.542512Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2026-01-08T15:35:23.542619Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2026-01-08T15:35:23.542658Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2026-01-08T15:35:23.542694Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2026-01-08T15:35:23.548246Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.548361Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:3 2026-01-08T15:35:23.548464Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:35:23.551831Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:35:23.551937Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:2 2026-01-08T15:35:23.552114Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.552837Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2026-01-08T15:35:23.552901Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 104:0 ProgressState 2026-01-08T15:35:23.553065Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#104:0 progress is 3/3 2026-01-08T15:35:23.553126Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:35:23.553187Z node 16 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#104:0 progress is 3/3 2026-01-08T15:35:23.553237Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:35:23.553286Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2026-01-08T15:35:23.553343Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2026-01-08T15:35:23.553410Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:0 2026-01-08T15:35:23.553461Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:0 2026-01-08T15:35:23.553625Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 4 2026-01-08T15:35:23.553684Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:1 2026-01-08T15:35:23.553714Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:1 2026-01-08T15:35:23.553770Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:23.553805Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 104:2 2026-01-08T15:35:23.553836Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 104:2 2026-01-08T15:35:23.553917Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 1 2026-01-08T15:35:23.554434Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:35:23.554581Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:35:23.554631Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2026-01-08T15:35:23.554774Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:35:23.554838Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 41], at schemeshard: 72057594046678944 2026-01-08T15:35:23.554936Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:23.555005Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 40], at schemeshard: 72057594046678944 2026-01-08T15:35:23.555053Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 3 2026-01-08T15:35:23.558612Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2026-01-08T15:35:23.559053Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2026-01-08T15:35:23.559118Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2026-01-08T15:35:23.559742Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2026-01-08T15:35:23.559873Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2026-01-08T15:35:23.559939Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [16:1159:3074] TestWaitNotification: OK eventTxId 104 2026-01-08T15:35:23.560748Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:23.561071Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl" took 337us result status StatusPathDoesNotExist 2026-01-08T15:35:23.561295Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 39])" Path: "/MyRoot/Table/0_continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 39 LastExistedPrefixDescription { Self { Name: "Table" PathId: 39 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000039 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2026-01-08T15:35:23.561957Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:23.562231Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/0_continuousBackupImpl/streamImpl" took 273us result status StatusPathDoesNotExist 2026-01-08T15:35:23.564314Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/0_continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 39])" Path: "/MyRoot/Table/0_continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 39 LastExistedPrefixDescription { Self { Name: "Table" PathId: 39 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000039 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestCreateStreamingQuery [GOOD] |87.7%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets >> TResourcePoolTest::CreateResourcePool >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> TResourcePoolTest::ParallelCreateSameResourcePool [GOOD] >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue >> TResourcePoolTest::DropResourcePoolTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestCreateStreamingQuery [GOOD] Test command err: Trying to start YDB, gRPC: 18868, MsgBus: 24384 2026-01-08T15:34:58.659718Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014695884501768:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:58.659836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:59.213448Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:59.366129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:59.366261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:59.374536Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014695884501549:2081] 1767886498581096 != 1767886498581099 2026-01-08T15:34:59.378197Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:59.397896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:59.593440Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:59.646740Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:59.721321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:59.721345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:59.721363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:59.721469Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:00.755631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:00.790159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:00.823401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2026-01-08T15:35:00.895875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) 2026-01-08T15:35:00.910949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 27777, MsgBus: 13847 2026-01-08T15:35:04.859062Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014719048006148:2062];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:04.859140Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:04.933430Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:05.114639Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014719048006127:2081] 1767886504856816 != 1767886504856819 2026-01-08T15:35:05.114722Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:05.123046Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:05.147587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:05.147665Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:05.186179Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:05.454371Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:05.454390Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:05.454397Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:05.454498Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:05.528441Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:05.914536Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:06.059791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:06.070127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:06.084219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Trying to start YDB, gRPC: 17320, MsgBus: 9623 2026-01-08T15:35:10.459933Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593014745048505964:2234];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:10.460376Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:10.586542Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:10.777667Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:10.782546Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:10.834535Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [3:7593014745048505760:2081] 1767886510443164 != 1767886510443167 2026-01-08T15:35:10.839882Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:10.853656Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:10.856157Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:11.082628Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:11.082678Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:11.082702Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:11.082810Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:11.297487Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:11.510685Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:12.072426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:12.079595Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:35:12.103369Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) Trying to start YDB, gRPC: 2885, MsgBus: 11508 2026-01-08T15:35:16.044214Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014771958504408:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:16.044257Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:16.128520Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:16.310583Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:16.341744Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:16.362508Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014771958504381:2081] 1767886516029655 != 1767886516029658 2026-01-08T15:35:16.447492Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:16.447643Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:16.455615Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:16.642714Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:16.642738Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:16.642745Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:16.642832Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:16.816287Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:17.074687Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:17.320156Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:17.332413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:17.404554Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:20.455056Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014789138374566:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.455193Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.455662Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014789138374584:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.455712Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014789138374582:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.455742Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014789138374583:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.456056Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:20.459716Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:20.473412Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014789138374590:2577] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:35:20.480899Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014789138374589:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:35:20.481264Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014789138374588:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:35:20.535883Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014789138374637:2608] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:20.584077Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014789138374655:2616] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:21.172798Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014771958504408:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:21.173277Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:21.706142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:22.550992Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:22.689207Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::ParallelCreateSameResourcePool [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:25.416900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:25.416986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:25.417024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:25.417061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:25.417116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:25.417164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:25.417227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:25.417302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:25.418205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:25.421008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:25.522966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:25.523047Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:25.540736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:25.541119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:25.541333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:25.555392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:25.555840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:25.556575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:25.557413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:25.560961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:25.561172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:25.562394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:25.562470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:25.562598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:25.562639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:25.562740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:25.562896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:25.729738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.730740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.730871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.730959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:25.731867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 01-08T15:35:26.637750Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" took 270us result status StatusSuccess 2026-01-08T15:35:26.638172Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 125 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:26.639022Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:26.639264Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" took 203us result status StatusSuccess 2026-01-08T15:35:26.639563Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 125 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 125 2026-01-08T15:35:26.639860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 125: send EvNotifyTxCompletion 2026-01-08T15:35:26.639899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 125 TestWaitNotification wait txId: 126 2026-01-08T15:35:26.640024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 126: send EvNotifyTxCompletion 2026-01-08T15:35:26.640061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 126 TestWaitNotification wait txId: 127 2026-01-08T15:35:26.640110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 127: send EvNotifyTxCompletion 2026-01-08T15:35:26.640129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 127 2026-01-08T15:35:26.640623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2026-01-08T15:35:26.640846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2026-01-08T15:35:26.640885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:754:2744] 2026-01-08T15:35:26.641084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2026-01-08T15:35:26.641213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2026-01-08T15:35:26.641256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2026-01-08T15:35:26.641288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:754:2744] 2026-01-08T15:35:26.641394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2026-01-08T15:35:26.641418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:754:2744] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2026-01-08T15:35:26.641994Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:26.642189Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" took 201us result status StatusSuccess 2026-01-08T15:35:26.642654Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 125 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 128 2026-01-08T15:35:26.645802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool CreateResourcePool { Name: "NilNoviSubLuna" } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:35:26.646090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046678944] TCreateResourcePool Propose: opId# 128:0, path# /MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna 2026-01-08T15:35:26.646271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 128:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2026-01-08T15:35:26.649094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 128, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" TxId: 128 SchemeshardId: 72057594046678944 PathId: 41 PathCreateTxId: 125, at schemeshard: 72057594046678944 2026-01-08T15:35:26.649370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges), operation: CREATE RESOURCE POOL, path: NilNoviSubLuna TestModificationResult got TxId: 128, wait until txId: 128 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_streaming_query_reboots/core-tx-schemeshard-ut_streaming_query_reboots |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_streaming_query_reboots/core-tx-schemeshard-ut_streaming_query_reboots |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_streaming_query_reboots/core-tx-schemeshard-ut_streaming_query_reboots >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table >> BsControllerConfig::AddDriveSerialMassive [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> DataShardVolatile::DistributedUpsertRestartAfterPlan-UseSink [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit >> TResourcePoolTest::CreateResourcePool [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> BasicUsage::ConflictingWrites [GOOD] Test command err: 2026-01-08T15:31:41.869434Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013849932015076:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:41.869468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:42.033548Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:31:42.690502Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:42.723094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:42.723231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:42.898284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:42.910784Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:42.970535Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013849932015052:2081] 1767886301866535 != 1767886301866538 2026-01-08T15:31:43.027454Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:43.033596Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:43.276207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0021c3/r3tmp/yandexKtybjF.tmp 2026-01-08T15:31:43.276232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0021c3/r3tmp/yandexKtybjF.tmp 2026-01-08T15:31:43.276367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0021c3/r3tmp/yandexKtybjF.tmp 2026-01-08T15:31:43.276438Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:43.351006Z INFO: TTestServer started on Port 24272 GrpcPort 26333 PQClient connected to localhost:26333 2026-01-08T15:31:43.741820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:43.765870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:43.783028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:31:43.917571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:44.188035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2026-01-08T15:31:44.206837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2026-01-08T15:31:46.871437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013849932015076:2064];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:46.871526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:47.083015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013875701819874:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.083162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.083268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013875701819886:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.083434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013875701819888:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.083476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.087209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:47.107147Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013875701819889:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:31:47.212508Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013875701819954:2650] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:47.456584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.542788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.646140Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013875701819962:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:31:47.649827Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=OWQxMWRhNmQtYTZmZjM3NTItYzdiOWIzNDEtMjFmMGM2YjQ=, ActorId: [1:7593013875701819871:2332], ActorState: ExecuteState, LegacyTraceId: 01kef3rfr2az989bdpr99nxdqd, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:31:47.657991Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:31:47.692763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013879996787546:2830] === CheckClustersList. Ok 2026-01-08T15:31:53.074364Z :CreateTopicWithCustomName INFO: TTopicSdkTestSetup started 2026-01-08T15:31:53.096102Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:31:53.120623Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013901471624222:2925] connected; active server actors: 1 2026-01-08T15:31:53.122647Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic-1767886313] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:31:53.128668Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic-1767886313] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:31:53.128853Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic-1767886313] BALANCER INIT DONE for test-topic-1767886313: (0, 72075186224037892) 2026-01-08T15:31:53.133533Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInte ... k 2026-01-08T15:35:22.253204Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:22.253232Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.253257Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:22.253286Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.253311Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:22.253342Z node 14 :PERSQUEUE DEBUG: partition_compaction.cpp:193: [72075186224037892][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:35:22.253395Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 16 requestId: cookie: 7 2026-01-08T15:35:22.253444Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 17 requestId: cookie: 8 2026-01-08T15:35:22.253467Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 18 requestId: cookie: 9 2026-01-08T15:35:22.253490Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'test-topic' partition: 0 messageNo: 19 requestId: cookie: 10 2026-01-08T15:35:22.253554Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2026-01-08T15:35:22.253588Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2026-01-08T15:35:22.253611Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2026-01-08T15:35:22.253635Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2026-01-08T15:35:22.255533Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: OnReadDone gRpcStatusCode: 0 2026-01-08T15:35:22.255641Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session got write response: acks { seq_no: 98 written { offset: 97 } } write_statistics { persisting_time { nanos: 4000000 } min_queue_wait_time { nanos: 4000000 } max_queue_wait_time { nanos: 4000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2026-01-08T15:35:22.255665Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] OnAck: seqNo=98, txId=? 2026-01-08T15:35:22.255705Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: acknoledged message 98 2026-01-08T15:35:22.255851Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: OnReadDone gRpcStatusCode: 0 2026-01-08T15:35:22.255933Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session got write response: acks { seq_no: 99 written { offset: 98 } } write_statistics { persisting_time { nanos: 4000000 } min_queue_wait_time { nanos: 2000000 } max_queue_wait_time { nanos: 2000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2026-01-08T15:35:22.255955Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] OnAck: seqNo=99, txId=? 2026-01-08T15:35:22.255976Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: acknoledged message 99 2026-01-08T15:35:22.256084Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: OnReadDone gRpcStatusCode: 0 2026-01-08T15:35:22.256187Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session got write response: acks { seq_no: 100 written { offset: 99 } } write_statistics { persisting_time { nanos: 4000000 } min_queue_wait_time { nanos: 2000000 } max_queue_wait_time { nanos: 2000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2026-01-08T15:35:22.256211Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] OnAck: seqNo=100, txId=? 2026-01-08T15:35:22.256240Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: acknoledged message 100 2026-01-08T15:35:22.258436Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session will now close 2026-01-08T15:35:22.258519Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: aborting 2026-01-08T15:35:22.260701Z node 14 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 13 sessionId: test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0 grpc read done: success: 0 data: 2026-01-08T15:35:22.260738Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 13 sessionId: test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0 grpc read failed 2026-01-08T15:35:22.260771Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 13 sessionId: test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0 grpc closed 2026-01-08T15:35:22.260790Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 13 sessionId: test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0 is DEAD 2026-01-08T15:35:22.261605Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:22.262445Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: gracefully shut down, all writes complete 2026-01-08T15:35:22.263186Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2026-01-08T15:35:22.263243Z :TRACE: [/Root] TRACE_EVENT Error status=CLIENT_CANCELLED 2026-01-08T15:35:22.263314Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session is aborting and will not restart 2026-01-08T15:35:22.263915Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [14:7593014789993300377:2722] destroyed 2026-01-08T15:35:22.263998Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:35:22.264067Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:22.264100Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.264123Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:22.264160Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.264180Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:22.267751Z node 14 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:149: new Describe topic request 2026-01-08T15:35:22.268015Z node 14 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1197: Describe topic actor for path test-topic 2026-01-08T15:35:22.269712Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72075186224037892] server connected, pipe [14:7593014798583235029:2742], now have 1 active actors on pipe 2026-01-08T15:35:22.269788Z node 14 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][test-topic] pipe [14:7593014798583235030:2743] connected; active server actors: 1 2026-01-08T15:35:22.270342Z node 14 :PERSQUEUE DEBUG: partition.cpp:1025: [72075186224037892][Partition][0][StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2026-01-08T15:35:22.272537Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [14:7593014798583235029:2742] destroyed 2026-01-08T15:35:22.274359Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|5a6fb3d5-62b317cd-aca1d264-67b98991_0] PartitionId [0] Generation [11] Write session: destroy 2026-01-08T15:35:22.350159Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:22.350211Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.350250Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:22.350287Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.350334Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:22.456547Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:22.456605Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.456636Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:22.456674Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:22.456711Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist |87.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:227:2066] recipient: [1:205:2077] 2026-01-08T15:35:20.364132Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:20.365379Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:20.365790Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:20.372272Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:20.372582Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:20.372724Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:20.372754Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:20.372971Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:20.390172Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:20.390300Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:20.392025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:20.392236Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:20.392380Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:20.392471Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:247:2066] recipient: [1:20:2067] 2026-01-08T15:35:20.407155Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:20.407319Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:20.433250Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:20.433376Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:20.433491Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:20.433574Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:20.433689Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:20.433766Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:20.433816Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:20.433872Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:20.444714Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:20.444844Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:20.457151Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:20.457297Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:20.458898Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:20.458982Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:20.459225Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:20.459279Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:20.473891Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2026-01-08T15:35:20.475511Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2026-01-08T15:35:20.476218Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] Leader for TabletID 72057594037932033 is [11:226:2079] sender: [11:227:2066] recipient: [11:208:2077] 2026-01-08T15:35:22.271759Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:22.272748Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:22.273021Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:22.278390Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:22.279071Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:22.279413Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:22.279448Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:22.279700Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:22.294953Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:22.295118Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:22.295252Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:22.295349Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:22.295449Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:22.295507Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:226:2079] sender: [11:247:2066] recipient: [11:20:2067] 2026-01-08T15:35:22.311216Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:22.311371Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:22.339007Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:22.339155Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:22.339250Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:22.339393Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:22.339519Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:22.339569Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:22.339606Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:22.339665Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:22.351209Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:22.351395Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:22.366995Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:22.367133Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:22.368747Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:22.368805Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:22.369033Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:22.369088Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:22.369684Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2026-01-08T15:35:22.373652Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2026-01-08T15:35:24.052526Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2026-01-08T15:35:24.053151Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2026-01-08T15:35:24.053896Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2026-01-08T15:35:24.054723Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2026-01-08T15:35:24.055483Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2026-01-08T15:35:24.056278Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2026-01-08T15:35:24.056943Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2026-01-08T15:35:24.057660Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2026-01-08T15:35:24.062634Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2026-01-08T15:35:24.063920Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2026-01-08T15:35:24.064785Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2026-01-08T15:35:24.065533Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2026-01-08T15:35:24.066273Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2026-01-08T15:35:24.071500Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:224:2066] recipient: [31:205:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:224:2066] recipient: [31:205:2077] Leader for TabletID 72057594037932033 is [31:226:2079] sender: [31:227:2066] recipient: [31:205:2077] 2026-01-08T15:35:26.195467Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:26.196493Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:26.196780Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:26.202664Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:26.203146Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:26.203325Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:26.203353Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:26.203575Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:26.217253Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:26.217396Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:26.217592Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:26.217807Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:26.217906Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:26.218091Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:226:2079] sender: [31:247:2066] recipient: [31:20:2067] 2026-01-08T15:35:26.230152Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:26.230308Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:26.263077Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:26.263209Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:26.263301Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:26.263395Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:26.263533Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:26.263589Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:26.263640Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:26.263717Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:26.274927Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:26.275092Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:26.286896Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:26.287025Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:26.288485Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:26.288539Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:26.288734Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:26.288774Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:26.289514Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2026-01-08T15:35:26.301038Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2026-01-08T15:35:26.301995Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2026-01-08T15:35:26.302739Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2026-01-08T15:35:26.303399Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2026-01-08T15:35:26.304100Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2026-01-08T15:35:26.304680Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2026-01-08T15:35:26.305339Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2026-01-08T15:35:26.306026Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2026-01-08T15:35:26.319630Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2026-01-08T15:35:26.321405Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2026-01-08T15:35:26.322362Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2026-01-08T15:35:26.323171Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2026-01-08T15:35:26.323961Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2026-01-08T15:35:26.324680Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2026-01-08T15:35:26.325529Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2026-01-08T15:35:26.326298Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2026-01-08T15:35:26.332799Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2026-01-08T15:35:26.333682Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2026-01-08T15:35:26.342997Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> TResourcePoolTest::DropResourcePoolTwice [GOOD] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::CreateResourcePool [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:26.782576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:26.782685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:26.782747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:26.782803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:26.782856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:26.782893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:26.782968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:26.783047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:26.784121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:26.784456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:26.887063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:26.887194Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:26.898950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:26.899328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:26.899522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:26.913927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:26.914421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:26.915195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:26.916160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:26.922371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:26.922580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:26.923851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:26.923934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:26.924082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:26.924131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:26.924253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:26.924418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:27.107828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.108889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.109873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.110014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.110103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000039 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:28.004981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_resource_pool.cpp:19: [72057594046678944] TCreateResourcePool TPropose, operationId: 102:0, HandleReply TEvOperationPlan: step# 5000039 2026-01-08T15:35:28.005094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:0 128 -> 240 2026-01-08T15:35:28.005218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:28.005279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:28.005839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:28.008002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:28.008095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2026-01-08T15:35:28.008561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:28.008591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 40] 2026-01-08T15:35:28.008698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:28.008752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:28.008804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:28.008829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 40 2026-01-08T15:35:28.008877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 41 2026-01-08T15:35:28.008910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 41 2026-01-08T15:35:28.009147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:35:28.009181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:0 ProgressState 2026-01-08T15:35:28.009305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:28.009349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:28.009407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:28.009431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:28.009460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2026-01-08T15:35:28.009488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:28.009533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:35:28.009569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:35:28.009619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 3 2026-01-08T15:35:28.009650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2026-01-08T15:35:28.009674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 40], 5 2026-01-08T15:35:28.009710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 41], 2 2026-01-08T15:35:28.010533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:28.010628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:28.010661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:35:28.010703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 5 2026-01-08T15:35:28.010757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:28.013132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:28.013212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:28.013236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:35:28.013258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 41], version: 2 2026-01-08T15:35:28.013282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:28.013348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2026-01-08T15:35:28.016681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:28.016897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2026-01-08T15:35:28.017044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:35:28.017083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2026-01-08T15:35:28.017423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:35:28.017514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:35:28.017544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:746:2736] TestWaitNotification: OK eventTxId 102 2026-01-08T15:35:28.017890Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:28.018069Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" took 174us result status StatusSuccess 2026-01-08T15:35:28.018361Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" PathDescription { Self { Name: "MyResourcePool" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TTxDataShardSampleKScan::SkipForeign [GOOD] >> TTxDataShardValidateUniqueIndexScan::BadRequest >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::DropResourcePoolTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:27.365726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:27.365814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:27.365852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:27.365886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:27.365945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:27.366020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:27.366093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:27.366162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:27.369652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:27.370045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:27.477683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:27.477753Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:27.489246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:27.489603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:27.489796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:27.509305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:27.509772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:27.510566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:27.512614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:27.516501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:27.516692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:27.517920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:27.517992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:27.518124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:27.518168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:27.518278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:27.518463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:27.669603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.673992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:27.674823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... h_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:28.674309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:35:28.674483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 40] 2026-01-08T15:35:28.674546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:28.674801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:28.674860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 39 2026-01-08T15:35:28.674911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 40 2026-01-08T15:35:28.674942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 41 FAKE_COORDINATOR: Erasing txId 103 2026-01-08T15:35:28.675312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2026-01-08T15:35:28.675363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 103:0 ProgressState 2026-01-08T15:35:28.675491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:35:28.675536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:28.675579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:35:28.675645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:28.675715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2026-01-08T15:35:28.675768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:28.675835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 103:0 2026-01-08T15:35:28.675878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 103:0 2026-01-08T15:35:28.675986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:28.676033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2026-01-08T15:35:28.676080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 39], 7 2026-01-08T15:35:28.676119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 40], 7 2026-01-08T15:35:28.676143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 41], 18446744073709551615 2026-01-08T15:35:28.677058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.677168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.677238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:28.677289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 41], version: 18446744073709551615 2026-01-08T15:35:28.677350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 1 2026-01-08T15:35:28.678508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:35:28.678585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 41], at schemeshard: 72057594046678944 2026-01-08T15:35:28.678726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:28.679387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.679513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.679557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:28.679596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 7 2026-01-08T15:35:28.679650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:35:28.681495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.681638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:28.681707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:28.681746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 7 2026-01-08T15:35:28.681791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:28.681911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2026-01-08T15:35:28.686230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2026-01-08T15:35:28.686812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:35:28.687554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2026-01-08T15:35:28.688357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2026-01-08T15:35:28.688704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2026-01-08T15:35:28.688753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2026-01-08T15:35:28.689246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2026-01-08T15:35:28.689365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:35:28.689434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:780:2770] TestWaitNotification: OK eventTxId 103 2026-01-08T15:35:28.690098Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:28.690462Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" took 354us result status StatusPathDoesNotExist 2026-01-08T15:35:28.690716Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.metadata/workload_manager/pools/MyResourcePool\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.metadata/workload_manager/pools\' (id: [OwnerId: 72057594046678944, LocalPathId: 40])" Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.metadata/workload_manager/pools" LastExistedPrefixPathId: 40 LastExistedPrefixDescription { Self { Name: "pools" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 39 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::ReadOnlyMode >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] >> BasicUsage::RecreateObserver [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 [GOOD] >> TResourcePoolTest::ParallelCreateResourcePool >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 >> KikimrIcGateway::TestDropStreamingQuery [GOOD] >> TResourcePoolTest::DropResourcePool >> KikimrIcGateway::TestAlterStreamingQuery [GOOD] >> TResourcePoolTest::AlterResourcePool >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties-UseSchemaSecrets |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> TResourcePoolTest::CreateResourcePoolWithProperties >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::RecreateObserver [GOOD] Test command err: 2026-01-08T15:33:36.454535Z :RetryDiscoveryWithCancel INFO: Random seed for debugging is 1767886416454497 2026-01-08T15:33:36.966269Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014342653347533:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:36.975082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:37.141992Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:37.207105Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:37.739897Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:37.740015Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:37.833678Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:37.949835Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:38.060689Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:38.062405Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:38.068707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:33:38.129430Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:38.129862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:38.129967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:38.150618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:38.150692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:38.168525Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:38.168702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:38.180076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:38.181775Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:38.284246Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:38.327925Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.005280s 2026-01-08T15:33:38.446406Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.020577s 2026-01-08T15:33:38.573469Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:38.661829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/00021b/r3tmp/yandexY8jY4U.tmp 2026-01-08T15:33:38.661856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/00021b/r3tmp/yandexY8jY4U.tmp 2026-01-08T15:33:38.662002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/00021b/r3tmp/yandexY8jY4U.tmp 2026-01-08T15:33:38.662080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:38.706396Z INFO: TTestServer started on Port 18597 GrpcPort 26035 PQClient connected to localhost:26035 2026-01-08T15:33:39.032984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:39.073942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:33:41.972515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014342653347533:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:41.972638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:42.657039Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014369481221920:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.657100Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014369481221934:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.657177Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.657554Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014369481221937:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.657622Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.683573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014368423152621:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.683678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.684192Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014368423152633:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.684227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014368423152634:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.684415Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.685838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:42.730762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014368423152671:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.730841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.731078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014368423152674:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.731149Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:42.755228Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014368423152639:2910] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:33:42.776898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014368423152638:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2026-01-08T15:33:42.778783Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014369481221936:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: { : Error: Close with zero timeout " } 2026-01-08T15:35:28.682408Z :DEBUG: [/Root] [/Root] [9be5433f-25d16c71-90854fad-447664e5] [] Abort session to cluster 2026-01-08T15:35:28.682857Z :INFO: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:28.682902Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:35:28.682941Z :INFO: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Counters: { Errors: 0 CurrentSessionLifetimeMs: 371 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:28.682999Z :NOTICE: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:35:28.683025Z :DEBUG: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] [] Abort session to cluster 2026-01-08T15:35:28.683284Z :INFO: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:28.683325Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:35:28.683359Z :INFO: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 369 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:28.683407Z :NOTICE: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:35:28.683432Z :DEBUG: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] [] Abort session to cluster 2026-01-08T15:35:28.683772Z :INFO: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:28.683820Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:35:28.683881Z :INFO: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 370 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:28.683982Z :NOTICE: [/Root] [/Root] [7bd88506-7758e3e4-151f748e-8215b45c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:35:28.684142Z :INFO: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:28.684178Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2026-01-08T15:35:28.684209Z :INFO: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Counters: { Errors: 0 CurrentSessionLifetimeMs: 373 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:28.684262Z :NOTICE: [/Root] [/Root] [cb91e997-750abe59-76fe6dd6-9e2a1b27] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:35:28.684317Z :INFO: [/Root] [/Root] [9be5433f-25d16c71-90854fad-447664e5] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:28.684354Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2026-01-08T15:35:28.684384Z :INFO: [/Root] [/Root] [9be5433f-25d16c71-90854fad-447664e5] Counters: { Errors: 0 CurrentSessionLifetimeMs: 393 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:28.684429Z :NOTICE: [/Root] [/Root] [9be5433f-25d16c71-90854fad-447664e5] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:35:28.690597Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_2735022498803147229_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:28.690636Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_2735022498803147229_v1 grpc read failed 2026-01-08T15:35:28.690670Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_2735022498803147229_v1 grpc closed 2026-01-08T15:35:28.690721Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 3 consumer shared/user session shared/user_3_3_2735022498803147229_v1 is DEAD 2026-01-08T15:35:28.691762Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_13992370491323882889_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:28.691776Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_13992370491323882889_v1 grpc read failed 2026-01-08T15:35:28.691794Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_13992370491323882889_v1 grpc closed 2026-01-08T15:35:28.691814Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer shared/user session shared/user_3_1_13992370491323882889_v1 is DEAD 2026-01-08T15:35:28.693371Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_5416137306792653692_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:28.693388Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_5416137306792653692_v1 grpc read failed 2026-01-08T15:35:28.693407Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_5416137306792653692_v1 grpc closed 2026-01-08T15:35:28.693428Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 2 consumer shared/user session shared/user_3_2_5416137306792653692_v1 is DEAD 2026-01-08T15:35:28.695381Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694667:2485] disconnected. 2026-01-08T15:35:28.695431Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694667:2485] disconnected; active server actors: 1 2026-01-08T15:35:28.695474Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694667:2485] client user disconnected session shared/user_3_3_2735022498803147229_v1 2026-01-08T15:35:28.695565Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1187: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2026-01-08T15:35:28.695624Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694662:2483] disconnected. 2026-01-08T15:35:28.695640Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694662:2483] disconnected; active server actors: 1 2026-01-08T15:35:28.695658Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694662:2483] client user disconnected session shared/user_3_1_13992370491323882889_v1 2026-01-08T15:35:28.695702Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694661:2484] disconnected. 2026-01-08T15:35:28.695723Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694661:2484] disconnected; active server actors: 1 2026-01-08T15:35:28.695739Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037893][rt3.dc1--test-topic] pipe [3:7593014822033694661:2484] client user disconnected session shared/user_3_2_5416137306792653692_v1 2026-01-08T15:35:28.698671Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037892] Destroy direct read session shared/user_3_3_2735022498803147229_v1 2026-01-08T15:35:28.698719Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014822033694670:2494] destroyed 2026-01-08T15:35:28.698773Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_3_2735022498803147229_v1 2026-01-08T15:35:28.738161Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:28.738204Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.738219Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:28.738245Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.738260Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:28.750564Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:35:28.750604Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:28.840040Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:28.840076Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.840089Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:28.840111Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.840125Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:28.942666Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:28.942701Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.942714Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:28.942735Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:28.942750Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist |87.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BsControllerConfig::MergeIntersectingBoxes >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 >> TResourcePoolTest::ReadOnlyMode [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing >> TResourcePoolTest::ParallelCreateResourcePool [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestAlterStreamingQuery [GOOD] Test command err: Trying to start YDB, gRPC: 24884, MsgBus: 65317 2026-01-08T15:34:59.229015Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014698860274812:2240];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:59.229262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:59.654395Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:59.677800Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:59.677910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:59.802459Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014698860274586:2081] 1767886499166579 != 1767886499166582 2026-01-08T15:34:59.813293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:59.817500Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:59.830408Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:00.174914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:00.174940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:00.174946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:00.175028Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:00.242459Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:01.054265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:01.063316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:01.076781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2026-01-08T15:35:01.132483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) 2026-01-08T15:35:01.151919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 62106, MsgBus: 27860 2026-01-08T15:35:05.106738Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014725523600168:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:05.107171Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:05.113232Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:05.312292Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:05.323013Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:05.324333Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014725523600125:2081] 1767886505090851 != 1767886505090854 2026-01-08T15:35:05.335259Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:05.335395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:05.363989Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:05.557589Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:05.714664Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:05.714689Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:05.714695Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:05.714781Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:06.102543Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:06.283678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:06.290898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:35:06.313465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2026-01-08T15:35:06.333541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) 2026-01-08T15:35:06.383282Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014729818568333:2537] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/f1/f2/external_table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeExternalTable, state: EPathStateNoChanges)" severity: 1 }
: Error: Scheme operation failed, status: ExecComplete, reason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeExternalTable, state: EPathStateNoChanges) 2026-01-08T15:35:06.383637Z node 2 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715660, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeExternalTable, state: EPathStateNoChanges) Trying to start YDB, gRPC: 1290, MsgBus: 28835 2026-01-08T15:35:10.153995Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593014746727201424:2146];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:10.159155Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:10.349661Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:10.378520Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [3:7593014746727201306:2081] 1767886510139979 != 1767886510139982 2026-01-08T15:35:10.434369Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:10.434529Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:10.434926Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:10.445354Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:10.651576Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:10.834291Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:10.834330Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:10.834346Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:10.834438Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:11.155821Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:11.651676Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:11.659548Z node 3 :FLAT_TX_SCHEMESHARD WAR ... Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:16.354224Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:16.358402Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:16.431428Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:16.434153Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:16.702924Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:16.702943Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:16.702950Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:16.703046Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:16.785081Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:17.088552Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:17.572373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:17.611362Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:17.664193Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterResourcePool, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp:155) Trying to start YDB, gRPC: 6413, MsgBus: 26570 2026-01-08T15:35:22.307910Z node 5 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7593014797026502497:2163];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:22.308003Z node 5 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:22.353774Z node 5 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:22.605457Z node 5 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:22.607695Z node 5 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:22.610558Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [5:7593014797026502372:2081] 1767886522273682 != 1767886522273685 2026-01-08T15:35:22.647775Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:22.647872Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:22.712389Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:22.890662Z node 5 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:22.891067Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:22.891076Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:22.891083Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:22.891163Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:23.311951Z node 5 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:23.477990Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:23.486219Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:23.541142Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:27.086774Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7593014818501339846:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.086963Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.087532Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7593014818501339864:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.087918Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7593014818501339862:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.087954Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7593014818501339863:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.087997Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:27.092896Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:27.100933Z node 5 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [5:7593014818501339870:2574] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:35:27.111021Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7593014818501339868:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:35:27.111085Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7593014818501339869:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:35:27.197216Z node 5 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [5:7593014818501339917:2605] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:27.205486Z node 5 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [5:7593014818501339925:2611] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:27.614144Z node 5 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7593014797026502497:2163];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:27.620972Z node 5 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:28.206164Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:29.004774Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:29.153166Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropStreamingQuery [GOOD] Test command err: Trying to start YDB, gRPC: 29656, MsgBus: 21442 2026-01-08T15:34:57.743956Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014688328997815:2136];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:57.744000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:57.858454Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:58.549543Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:58.608008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:58.608168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:58.747835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:58.805708Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:58.940942Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014688328997717:2081] 1767886497733068 != 1767886497733071 2026-01-08T15:34:58.948778Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:58.966902Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:59.143782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:59.143802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:59.143809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:59.143891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:59.966274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:02.669962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014709803835129:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.670128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.678473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014709803835139:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.678631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.750454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014688328997815:2136];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:02.750531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:02.964965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.114768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.169443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.213889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.281255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714098802742:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.281407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.282467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714098802747:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.282521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714098802748:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.282676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.287775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:03.300884Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014714098802751:2366], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2026-01-08T15:35:03.451923Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014714098802802:2769] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 47], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14398, MsgBus: 64309 2026-01-08T15:35:05.203003Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:05.223124Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:05.438550Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014725922947518:2081] 1767886505040850 != 1767886505040853 2026-01-08T15:35:05.454092Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:05.458535Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:05.462636Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:05.462712Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:05.491029Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:05.670911Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:05.670935Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:05.670942Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:05.671024Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:05.846109Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:06.142590Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:06.495299Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:10.057943Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014747397784934:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, i ... VICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:17.641203Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014775611683366:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:17.641248Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7593014775611683367:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:17.641288Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:17.646089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:17.658370Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7593014775611683370:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:35:17.731585Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014775611683421:3773] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:20.296217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) 2026-01-08T15:35:20.339312Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) 2026-01-08T15:35:20.392274Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) Trying to start YDB, gRPC: 24416, MsgBus: 26739 2026-01-08T15:35:21.823844Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014794049465541:2254];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:21.824087Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:21.882716Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:22.089704Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:22.262531Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014794049465323:2081] 1767886521789897 != 1767886521789900 2026-01-08T15:35:22.276112Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:22.289138Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:22.293354Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:22.416480Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:22.422407Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:22.646406Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:22.646431Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:22.646438Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:22.646528Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:22.790489Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:23.395392Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:23.404643Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:23.435999Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:26.833760Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014794049465541:2254];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:26.834557Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:26.837234Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014815524302798:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:26.837391Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:26.837749Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014815524302813:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:26.837804Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014815524302814:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:26.841756Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:26.846578Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014815524302822:2575] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2026-01-08T15:35:26.854957Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014815524302818:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:35:26.855018Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014815524302817:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:35:26.928862Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014815524302877:2606] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:26.947271Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014815524302887:2613] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 43], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:27.690821Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:28.215956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:28.364018Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> TResourcePoolTest::DropResourcePool [GOOD] >> TResourcePoolTest::AlterResourcePool [GOOD] >> TResourcePoolTest::AlterResourcePoolShouldFailIfSuchEntityNotExists >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation+UseSchemaSecrets >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::ParallelCreateResourcePool [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:31.400664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:31.400775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:31.400825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:31.400870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:31.400944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:31.401009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:31.401080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:31.401164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:31.402307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:31.402760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:31.514045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:31.514120Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:31.529589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:31.530021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:31.530219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:31.556924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:31.557451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:31.558628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:31.565200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:31.569345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:31.569571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:31.570932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:31.571013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:31.571153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:31.571209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:31.571314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:31.571479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:31.751571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.752642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.752789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.752889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.752961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:31.753824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... t@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool1" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:32.726507Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:32.726726Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" took 376us result status StatusSuccess 2026-01-08T15:35:32.727057Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" PathDescription { Self { Name: "MyResourcePool2" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 126 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool2" PathId { OwnerId: 72057594046678944 LocalId: 42 } Version: 1 Properties { } } } PathId: 42 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:32.727893Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:32.728098Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools" took 242us result status StatusSuccess 2026-01-08T15:35:32.728562Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools" PathDescription { Self { Name: "pools" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 124 CreateStep: 5000038 ParentPathId: 39 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "MyResourcePool1" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 125 CreateStep: 5000040 ParentPathId: 40 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "MyResourcePool2" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 126 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 40 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:32.729110Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:32.729368Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool1" took 221us result status StatusSuccess 2026-01-08T15:35:32.729695Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool1" PathDescription { Self { Name: "MyResourcePool1" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 125 CreateStep: 5000040 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool1" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:32.730232Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:32.730473Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" took 246us result status StatusSuccess 2026-01-08T15:35:32.730793Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool2" PathDescription { Self { Name: "MyResourcePool2" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 126 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 41 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool2" PathId { OwnerId: 72057594046678944 LocalId: 42 } Version: 1 Properties { } } } PathId: 42 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:30.621064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:30.621156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:30.621194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:30.621234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:30.621296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:30.621335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:30.621420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:30.621499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:30.622511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:30.622841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:30.731321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:30.731452Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:30.744923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:30.745267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:30.745466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:30.761766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:30.762224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:30.763045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:30.763966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:30.767377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:30.767582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:30.768858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:30.768936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:30.769094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:30.769143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:30.769250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:30.769413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:30.936660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.941917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:30.942500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... schemeshard: 72057594046678944 2026-01-08T15:35:32.543810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 129 ready parts: 1/1 2026-01-08T15:35:32.543952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 129 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:35:32.545435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.545550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.545601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:35:32.545641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2026-01-08T15:35:32.545691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2026-01-08T15:35:32.549128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 43 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.549242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 43 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.549282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:35:32.549316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 2 2026-01-08T15:35:32.549349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:35:32.549442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 129, ready parts: 0/1, is published: true 2026-01-08T15:35:32.554944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 129:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:129 msg type: 269090816 2026-01-08T15:35:32.555192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 2026-01-08T15:35:32.557374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 FAKE_COORDINATOR: Add transaction: 129 at step: 5000041 FAKE_COORDINATOR: advance: minStep5000041 State->FrontStep: 5000040 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000041 2026-01-08T15:35:32.558277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2026-01-08T15:35:32.559482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000041, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:32.559629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000041 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:32.559718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 129:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000041, at schemeshard: 72057594046678944 2026-01-08T15:35:32.559895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 129:0 128 -> 240 2026-01-08T15:35:32.560070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:35:32.560139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 FAKE_COORDINATOR: Erasing txId 129 2026-01-08T15:35:32.562652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:32.562711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:32.562897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:35:32.562985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:32.563020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:901:2859], at schemeshard: 72057594046678944, txId: 129, path id: 1 2026-01-08T15:35:32.563059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:901:2859], at schemeshard: 72057594046678944, txId: 129, path id: 43 2026-01-08T15:35:32.563368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 129:0, at schemeshard: 72057594046678944 2026-01-08T15:35:32.563436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 129:0 ProgressState 2026-01-08T15:35:32.563554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#129:0 progress is 1/1 2026-01-08T15:35:32.563598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:35:32.563689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#129:0 progress is 1/1 2026-01-08T15:35:32.563734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:35:32.563775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2026-01-08T15:35:32.563812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:35:32.563850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 129:0 2026-01-08T15:35:32.563886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 129:0 2026-01-08T15:35:32.564005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:35:32.564067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2026-01-08T15:35:32.564103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2026-01-08T15:35:32.564136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 43], 3 2026-01-08T15:35:32.564996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.565084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.565121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:35:32.565176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2026-01-08T15:35:32.565245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2026-01-08T15:35:32.566287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 43 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.566627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 43 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:35:32.566676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:35:32.566706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 3 2026-01-08T15:35:32.566736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 2026-01-08T15:35:32.566814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2026-01-08T15:35:32.569868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2026-01-08T15:35:32.571028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 TestModificationResult got TxId: 129, wait until txId: 129 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow >> TResourcePoolTest::CreateResourcePoolWithProperties [GOOD] >> TxUsage::Write_And_Read_Small_Messages_1 [GOOD] >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::DropResourcePool [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:32.036687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:32.036777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:32.036816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:32.036851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:32.036915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:32.036948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:32.037016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:32.037095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:32.037994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:32.038302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:32.136431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:32.136565Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:32.164110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:32.164500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:32.164711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:32.191307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:32.191810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:32.192536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:32.193395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:32.196680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:32.196876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:32.198100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:32.198187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:32.198349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:32.198393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:32.198502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:32.198670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:32.374038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.375956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.376021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.376091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.376237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.376307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... HEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:33.589601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:35:33.589708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 40] 2026-01-08T15:35:33.589746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:33.589858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:33.589883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 39 2026-01-08T15:35:33.589926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 40 2026-01-08T15:35:33.589944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 103, path id: 41 2026-01-08T15:35:33.590158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2026-01-08T15:35:33.590196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 103:0 ProgressState 2026-01-08T15:35:33.590277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:35:33.590304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:33.590361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:35:33.590397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:33.590517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2026-01-08T15:35:33.590568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:35:33.590609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 103:0 2026-01-08T15:35:33.590648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 103:0 2026-01-08T15:35:33.590744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:33.590798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2026-01-08T15:35:33.590856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 39], 7 2026-01-08T15:35:33.590892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 40], 7 2026-01-08T15:35:33.590915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 41], 18446744073709551615 2026-01-08T15:35:33.591612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.591739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.591780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:33.591836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 41], version: 18446744073709551615 2026-01-08T15:35:33.591901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 1 2026-01-08T15:35:33.592366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:35:33.592423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 41], at schemeshard: 72057594046678944 2026-01-08T15:35:33.592515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:33.592995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.593076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.593107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:33.593147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 7 2026-01-08T15:35:33.593203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:35:33.594282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.598536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:35:33.598602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:35:33.598652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 7 2026-01-08T15:35:33.598727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:33.598839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2026-01-08T15:35:33.604898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2026-01-08T15:35:33.605216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:35:33.608977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2026-01-08T15:35:33.609549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2026-01-08T15:35:33.609850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2026-01-08T15:35:33.609911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2026-01-08T15:35:33.610360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2026-01-08T15:35:33.610464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:35:33.610504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:777:2767] TestWaitNotification: OK eventTxId 103 2026-01-08T15:35:33.611084Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:33.611367Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" took 294us result status StatusPathDoesNotExist 2026-01-08T15:35:33.611588Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.metadata/workload_manager/pools/MyResourcePool\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.metadata/workload_manager/pools\' (id: [OwnerId: 72057594046678944, LocalPathId: 40])" Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.metadata/workload_manager/pools" LastExistedPrefixPathId: 40 LastExistedPrefixDescription { Self { Name: "pools" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 39 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [GOOD] >> TResourcePoolTest::AlterResourcePoolShouldFailIfSuchEntityNotExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::CreateResourcePoolWithProperties [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:33.040618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:33.040721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:33.040769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:33.040812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:33.040872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:33.040917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:33.040973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:33.041037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:33.041984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:33.042287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:33.190031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:33.190094Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:33.219937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:33.220253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:33.220423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:33.236464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:33.236942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:33.237687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:33.238444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:33.241710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:33.241880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:33.243050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:33.243119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:33.243252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:33.243298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:33.243420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:33.243571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:33.418985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.420926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.421005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.421106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:33.421201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... d: 72057594046678944 2026-01-08T15:35:34.567814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_resource_pool.cpp:19: [72057594046678944] TCreateResourcePool TPropose, operationId: 102:0, HandleReply TEvOperationPlan: step# 5000039 2026-01-08T15:35:34.568007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:0 128 -> 240 2026-01-08T15:35:34.568217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:34.568293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:34.569057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:34.573637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:34.573819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2026-01-08T15:35:34.574623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:34.574676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 40] 2026-01-08T15:35:34.574856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:34.574936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:35:34.575079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:34.575123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 40 2026-01-08T15:35:34.575178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 41 2026-01-08T15:35:34.575218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 41 2026-01-08T15:35:34.575522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:35:34.575587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:0 ProgressState 2026-01-08T15:35:34.575813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:34.575856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:34.575902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:34.575941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:34.575980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2026-01-08T15:35:34.576029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:34.576092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:35:34.576136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:35:34.576235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 3 2026-01-08T15:35:34.576280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2026-01-08T15:35:34.576314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 40], 5 2026-01-08T15:35:34.576378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 41], 2 2026-01-08T15:35:34.577524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:34.577639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:34.577677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:35:34.577725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 5 2026-01-08T15:35:34.577774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:34.579661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:34.579780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:35:34.579814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:35:34.579846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 41], version: 2 2026-01-08T15:35:34.579883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:35:34.579982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2026-01-08T15:35:34.587808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:35:34.588331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2026-01-08T15:35:34.588638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:35:34.588689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2026-01-08T15:35:34.589097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:35:34.589220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:35:34.589282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:746:2736] TestWaitNotification: OK eventTxId 102 2026-01-08T15:35:34.589806Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:35:34.590117Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" took 326us result status StatusSuccess 2026-01-08T15:35:34.590687Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.metadata/workload_manager/pools/MyResourcePool" PathDescription { Self { Name: "MyResourcePool" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeResourcePool CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 40 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ResourcePoolVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ResourcePoolDescription { Name: "MyResourcePool" PathId { OwnerId: 72057594046678944 LocalId: 41 } Version: 1 Properties { Properties { key: "concurrent_query_limit" value: "10" } Properties { key: "query_cancel_after_seconds" value: "60" } } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> BsControllerConfig::DeleteStoragePool >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish >> TTxDataShardUploadRows::TestUploadRows >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] Test command err: 2026-01-08T15:31:41.658589Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013848250706156:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:41.658738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:41.751239Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:41.751352Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:31:42.324655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:42.324777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:42.521188Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:42.544617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:42.550495Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013848250706034:2081] 1767886301626139 != 1767886301626142 2026-01-08T15:31:42.563471Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:42.685134Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:42.831916Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:42.913084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0021d1/r3tmp/yandex3SKMQu.tmp 2026-01-08T15:31:42.913121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0021d1/r3tmp/yandex3SKMQu.tmp 2026-01-08T15:31:42.913289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0021d1/r3tmp/yandex3SKMQu.tmp 2026-01-08T15:31:42.913361Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:43.022638Z INFO: TTestServer started on Port 12666 GrpcPort 8895 PQClient connected to localhost:8895 2026-01-08T15:31:43.407631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:43.423032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:43.432026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:31:43.563952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:43.810975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2026-01-08T15:31:46.658137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013848250706156:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:46.658246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:47.137110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013874020510857:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.137345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.137848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013874020510869:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.137892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013874020510870:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.137937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:47.148108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:47.168144Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013874020510873:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:31:47.283270Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013874020510938:2652] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:47.769934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.779139Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013874020510946:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:31:47.792238Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=MmVlNjAxZTItOWE1ZWM3MmItZTAxMDYxNDgtNTM1OTg3Mjg=, ActorId: [1:7593013874020510855:2332], ActorState: ExecuteState, LegacyTraceId: 01kef3rfsv0ezv4kq0s6bs6krc, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:31:47.827812Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:31:47.832543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.959344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013878315478523:2832] === CheckClustersList. Ok 2026-01-08T15:31:53.922530Z :WriteToTopic_Demo_12_Table INFO: TTopicSdkTestSetup started 2026-01-08T15:31:53.953314Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:31:53.990999Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013899790315188:2925] connected; active server actors: 1 2026-01-08T15:31:53.991298Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:31:54.007058Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:31:54.007280Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2026-01-08T15:31:54.012278Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvIntercon ... 08138Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.508194Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.508217Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.508240Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.508258Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.537861Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.537904Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.537937Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.537960Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.537979Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.559256Z :INFO: [/Root] [/Root] [7fccd8fc-511bbd77-9be0896c-4c880d5] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:30.559350Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:21:22 2026-01-08T15:35:30.559417Z :INFO: [/Root] [/Root] [7fccd8fc-511bbd77-9be0896c-4c880d5] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2007 BytesRead: 16000000 MessagesRead: 22 BytesReadCompressed: 16000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:30.559562Z :NOTICE: [/Root] [/Root] [7fccd8fc-511bbd77-9be0896c-4c880d5] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:35:30.559637Z :DEBUG: [/Root] [/Root] [7fccd8fc-511bbd77-9be0896c-4c880d5] [] Abort session to cluster 2026-01-08T15:35:30.560340Z :DEBUG: [/Root] 0x00007D37EF782190 TDirectReadSessionManager ServerSessionId=test-consumer_14_1_10534265443994549157_v1 Close 2026-01-08T15:35:30.560809Z :DEBUG: [/Root] 0x00007D37EF782190 TDirectReadSessionManager ServerSessionId=test-consumer_14_1_10534265443994549157_v1 Close 2026-01-08T15:35:30.560970Z :NOTICE: [/Root] [/Root] [7fccd8fc-511bbd77-9be0896c-4c880d5] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:35:30.563087Z node 14 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:30.563133Z node 14 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 grpc read failed 2026-01-08T15:35:30.563171Z node 14 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 grpc closed 2026-01-08T15:35:30.563207Z node 14 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 is DEAD 2026-01-08T15:35:30.564163Z node 14 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:84: Direct read proxy [14:7593014822580375753:2548]: session cookie 2 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:30.564188Z node 14 :PQ_READ_PROXY INFO: direct_read_actor.cpp:87: Direct read proxy [14:7593014822580375753:2548]: session cookie 2 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1grpc read failed 2026-01-08T15:35:30.564226Z node 14 :PQ_READ_PROXY INFO: direct_read_actor.cpp:66: Direct read proxy [14:7593014822580375753:2548]: session cookie 2 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 grpc closed 2026-01-08T15:35:30.564258Z node 14 :PQ_READ_PROXY INFO: direct_read_actor.cpp:167: Direct read proxy [14:7593014822580375753:2548]: session cookie 2 consumer test-consumer session test-consumer_14_1_10534265443994549157_v1 proxy is DEAD 2026-01-08T15:35:30.565021Z node 14 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037895][topic_A] pipe [14:7593014822580375743:2542] disconnected. 2026-01-08T15:35:30.565057Z node 14 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037895][topic_A] pipe [14:7593014822580375743:2542] disconnected; active server actors: 1 2026-01-08T15:35:30.565085Z node 14 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037895][topic_A] pipe [14:7593014822580375743:2542] client test-consumer disconnected session test-consumer_14_1_10534265443994549157_v1 2026-01-08T15:35:30.565194Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037894] Destroy direct read session test-consumer_14_1_10534265443994549157_v1 2026-01-08T15:35:30.565232Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [14:7593014822580375748:2546] destroyed 2026-01-08T15:35:30.565279Z node 14 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_14_1_10534265443994549157_v1 2026-01-08T15:35:30.566934Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2026-01-08T15:35:30.566988Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0] PartitionId [0] Generation [2] Write session will now close 2026-01-08T15:35:30.567048Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0] PartitionId [0] Generation [2] Write session: aborting 2026-01-08T15:35:30.567180Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2026-01-08T15:35:30.567221Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0] PartitionId [0] Generation [2] Write session: destroy 2026-01-08T15:35:30.577115Z node 14 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0 grpc read done: success: 0 data: 2026-01-08T15:35:30.577144Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0 grpc read failed 2026-01-08T15:35:30.577191Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0 grpc closed 2026-01-08T15:35:30.577207Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|faca1ecd-8e92d4d1-978ce8bd-ccbb48d2_0 is DEAD 2026-01-08T15:35:30.577943Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:30.580546Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [14:7593014813990441100:2523] destroyed 2026-01-08T15:35:30.580594Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037894][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:35:30.580635Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.580653Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.580667Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.580687Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.580700Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.610737Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.610782Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.610801Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.610821Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.610846Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.639323Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.639363Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.639381Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.639401Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.639419Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.712564Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.712601Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.712619Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.712639Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.712654Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:30.739018Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:30.739059Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.739080Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:30.739114Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:30.739133Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist |87.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TResourcePoolTest::AlterResourcePoolShouldFailIfSuchEntityNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:32.348819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:32.348930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:32.349009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:32.349052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:32.349095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:32.349144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:32.349220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:32.349293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:32.350277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:32.350799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:32.461149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:32.461212Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:32.472817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:32.473135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:32.473297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:32.486792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:32.487525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:32.488370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:32.489267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:32.492817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:32.492988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:32.494149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:32.494230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:32.494420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:32.494487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:32.494935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:32.495132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:32.663546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.664954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.665001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.665057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.665122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.665177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:32.665243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... is done id#101:0 progress is 3/3 2026-01-08T15:35:35.439512Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2026-01-08T15:35:35.439541Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 3/3 2026-01-08T15:35:35.439563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2026-01-08T15:35:35.439588Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: false 2026-01-08T15:35:35.439627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2026-01-08T15:35:35.439666Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:0 2026-01-08T15:35:35.439718Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:0 2026-01-08T15:35:35.439794Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 3 2026-01-08T15:35:35.439830Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:1 2026-01-08T15:35:35.439852Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:1 2026-01-08T15:35:35.439907Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 3 2026-01-08T15:35:35.439943Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:2 2026-01-08T15:35:35.439963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:2 2026-01-08T15:35:35.439994Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 2 2026-01-08T15:35:35.440020Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 101, publications: 4, subscribers: 0 2026-01-08T15:35:35.440083Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2026-01-08T15:35:35.440116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 38], 6 2026-01-08T15:35:35.440137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 39], 5 2026-01-08T15:35:35.440163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 40], 3 2026-01-08T15:35:35.442372Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.442471Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.442542Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 4, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:35:35.442591Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2026-01-08T15:35:35.442644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:35:35.443516Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 6 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.443596Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 6 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.443625Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:35:35.443659Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 6 2026-01-08T15:35:35.443724Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:35:35.444845Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.444924Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.444952Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:35:35.445006Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 5 2026-01-08T15:35:35.445043Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:35:35.446033Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.446109Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 40 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:35:35.446145Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:35:35.446175Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 40], version: 3 2026-01-08T15:35:35.446205Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 40] was 1 2026-01-08T15:35:35.446286Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2026-01-08T15:35:35.449514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:35:35.449642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:35:35.449735Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:35:35.449802Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:35:35.450013Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2026-01-08T15:35:35.450061Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2026-01-08T15:35:35.450465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2026-01-08T15:35:35.450565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2026-01-08T15:35:35.450606Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:720:2710] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2026-01-08T15:35:35.453899Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.metadata/workload_manager/pools" OperationType: ESchemeOpAlterResourcePool CreateResourcePool { Name: "MyResourcePool" Properties { Properties { key: "concurrent_query_limit" value: "20" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:35:35.454149Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_resource_pool.cpp:123: [72057594046678944] TAlterResourcePool Propose: opId# 102:0, path# /MyRoot/.metadata/workload_manager/pools/MyResourcePool 2026-01-08T15:35:35.454363Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 102:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/MyResourcePool', error: path hasn't been resolved, nearest resolved path: '/MyRoot/.metadata/workload_manager/pools' (id: [OwnerId: 72057594046678944, LocalPathId: 40]), at schemeshard: 72057594046678944 2026-01-08T15:35:35.456727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 102, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.metadata/workload_manager/pools/MyResourcePool\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.metadata/workload_manager/pools\' (id: [OwnerId: 72057594046678944, LocalPathId: 40])" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:35.457024Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/.metadata/workload_manager/pools/MyResourcePool', error: path hasn't been resolved, nearest resolved path: '/MyRoot/.metadata/workload_manager/pools' (id: [OwnerId: 72057594046678944, LocalPathId: 40]), operation: ALTER RESOURCE POOL, path: MyResourcePool TestModificationResult got TxId: 102, wait until txId: 102 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_resource_pool/unittest >> TTxDataShardUploadRows::RetryUploadRowsToShard >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets+UseAuthToken [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets+UseAuthToken >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData >> CdcStreamChangeCollector::PageFaults [GOOD] >> CdcStreamChangeCollector::OldImage >> BsControllerConfig::ManyPDisksRestarts >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 >> BsControllerConfig::SoleCommandRollback >> BsControllerConfig::ExtendByCreatingSeparateBox >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow >> BsControllerConfig::SoleCommandRollback [GOOD] >> BsControllerConfig::UnsupportedCommandError >> TTxDataShardValidateUniqueIndexScan::BadRequest [GOOD] >> TTxDataShardValidateUniqueIndexScan::RunScan >> BsControllerConfig::UnsupportedCommandError [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:17.632615Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:17.766085Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:17.766563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:17.766739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:18.090972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:18.091132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:18.169244Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886514516184 != 1767886514516188 2026-01-08T15:35:18.185177Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:18.234039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:18.345602Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:18.712924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:18.728724Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:18.878895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:18.971749Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:35:18.972047Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:19.023411Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:35:19.023702Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:19.034663Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:19.034845Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:19.036876Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:19.036986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:19.037050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:19.037503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:19.037797Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:19.037906Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:35:19.038542Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:19.038692Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:19.040374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:19.040461Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:19.040522Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:19.040846Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:19.040983Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:19.041072Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:35:19.052382Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:19.111904Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:19.112182Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:19.112324Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:35:19.112368Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:19.112408Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:19.112467Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:19.113096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:19.113152Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:19.113231Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:19.113302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:35:19.113329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:19.113381Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:19.113429Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:19.113630Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:19.113739Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:19.113957Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:19.114013Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:19.114058Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:19.114111Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:19.114273Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:35:19.114343Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:19.114413Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:19.114974Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:19.115289Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:19.115483Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:19.116000Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:19.116047Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:19.116084Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:19.116129Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:19.116288Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:891:2770], serverId# [1:903:2777], sessionId# [0:0:0] 2026-01-08T15:35:19.116629Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:19.116832Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:19.116907Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2026-01-08T15:35:19.124749Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:19.124915Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:19.139537Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:19.139695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:19.139810Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:19.139853Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:19.277059Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:950:2806], serverId# [1:953:2809], sessionId# [0:0:0] 2026- ... , outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:38.442539Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:38.442601Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:38.443021Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:38.443075Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037890 2026-01-08T15:35:38.443120Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037890 2026-01-08T15:35:38.443186Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:38.443238Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:38.443310Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:35:38.458198Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:38.459302Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2026-01-08T15:35:38.459399Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:38.460053Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:38.460859Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:38.460923Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:38.461057Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:38.461961Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2026-01-08T15:35:38.462016Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2026-01-08T15:35:38.478771Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1046:2883], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.478888Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1056:2888], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.478978Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.480036Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1062:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.480177Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.487225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:38.494506Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:38.494644Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:38.494698Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:38.639914Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:38.640054Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:38.640112Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:38.643476Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1060:2891], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:35:38.680754Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1120:2932] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 46], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:38.794695Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1224:2975], serverId# [4:1225:2976], sessionId# [0:0:0] 2026-01-08T15:35:38.795136Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037889 2026-01-08T15:35:38.795453Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886538795326 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.795633Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886538795326 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.795781Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037889, row count=1 2026-01-08T15:35:38.807153Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:38.807267Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:38.903559Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037889 2026-01-08T15:35:38.903933Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 3 Group: 1767886538903823 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.904148Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 4 Group: 1767886538903823 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.904274Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 5 Group: 1767886538903823 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.904406Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 6 Group: 1767886538903823 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:38.904515Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037889, row count=1 2026-01-08T15:35:38.919234Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:38.919322Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:38.927076Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1272:3015], serverId# [4:1273:3016], sessionId# [0:0:0] 2026-01-08T15:35:38.934211Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1274:3017], serverId# [4:1275:3018], sessionId# [0:0:0] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::UnsupportedCommandError [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:26:2057] recipient: [1:24:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:26:2057] recipient: [1:24:2068] Leader for TabletID 72057594037932033 is [1:28:2070] sender: [1:29:2057] recipient: [1:24:2068] 2026-01-08T15:35:39.512340Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:39.513421Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:39.513809Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:39.514308Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:39.515905Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:39.516097Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.516116Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.516317Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:39.524767Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:39.524888Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:39.525063Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:39.525183Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.525271Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.525341Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:28:2070] sender: [1:49:2057] recipient: [1:11:2058] 2026-01-08T15:35:39.538883Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:39.539045Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.566929Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.567073Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.567168Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.567251Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.567368Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.567441Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.567492Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.567537Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.578910Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.579088Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:39.594877Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:39.595038Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:39.596423Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:39.596481Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:39.596686Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:39.596733Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:39.625244Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { UpdateSettings { PDiskSpaceColorBorder: CYAN } } } Status { Success: true } Success: true 2026-01-08T15:35:39.637573Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { UpdateSettings { PDiskSpaceColorBorder: YELLOW } } Rollback: true } Status { Success: true } ErrorDescription: "transaction rollback" RollbackSuccess: true 2026-01-08T15:35:39.660924Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } Status { Success: true BaseConfig { Node { NodeId: 1 PhysicalLocation: "\n\001\000\000\000\024\001\000\000\000\036\001\000\000\000(\001\000\000\000" HostKey { Fqdn: "::1" IcPort: 12001 } Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Settings { DefaultMaxSlots: 16 EnableSelfHeal: true EnableDonorMode: true ScrubPeriodicitySeconds: 2592000 PDiskSpaceMarginPromille: 150 GroupReserveMin: 0 GroupReservePartPPM: 0 MaxScrubbedDisksAtOnce: 4294967295 PDiskSpaceColorBorder: CYAN EnableGroupLayoutSanitizer: false AllowMultipleRealmsOccupation: true UseSelfHealLocalPolicy: false TryToRelocateBrokenDisksLocallyFirst: false } } } Success: true ConfigTxSeqNo: 1 Leader for TabletID 72057594037932033 is [0:0:0] sender: [2:26:2057] recipient: [2:23:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [2:26:2057] recipient: [2:23:2068] Leader for TabletID 72057594037932033 is [2:28:2070] sender: [2:29:2057] recipient: [2:23:2068] 2026-01-08T15:35:39.863336Z node 2 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:39.864452Z node 2 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:39.864782Z node 2 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:39.865313Z node 2 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:39.874803Z node 2 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:39.875085Z node 2 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.875121Z node 2 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.875341Z node 2 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:39.884946Z node 2 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:39.885069Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:39.885191Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:39.885294Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.885387Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.885463Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [2:28:2070] sender: [2:49:2057] recipient: [2:11:2058] 2026-01-08T15:35:39.898941Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:39.899130Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.921611Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.921746Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.921819Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.921886Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.921990Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.922030Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.922077Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.922126Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.933148Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.933285Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:39.945822Z node 2 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEna ... 16 EnableSelfHeal: true EnableDonorMode: true ScrubPeriodicitySeconds: 2592000 PDiskSpaceMarginPromille: 150 GroupReserveMin: 0 GroupReservePartPPM: 0 MaxScrubbedDisksAtOnce: 4294967295 PDiskSpaceColorBorder: CYAN EnableGroupLayoutSanitizer: false AllowMultipleRealmsOccupation: true UseSelfHealLocalPolicy: false TryToRelocateBrokenDisksLocallyFirst: false } } } Success: true ConfigTxSeqNo: 1 Leader for TabletID 72057594037932033 is [0:0:0] sender: [3:26:2057] recipient: [3:23:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [3:26:2057] recipient: [3:23:2068] Leader for TabletID 72057594037932033 is [3:28:2070] sender: [3:29:2057] recipient: [3:23:2068] 2026-01-08T15:35:40.163258Z node 3 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:40.164237Z node 3 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:40.164479Z node 3 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:40.165018Z node 3 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:40.166243Z node 3 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:40.166460Z node 3 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.166506Z node 3 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.166683Z node 3 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:40.175978Z node 3 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:40.176118Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:40.176232Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:40.176332Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.176471Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.176550Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [3:28:2070] sender: [3:49:2057] recipient: [3:11:2058] 2026-01-08T15:35:40.188970Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:40.189179Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.213172Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.213310Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.213391Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.213465Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.213597Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.213652Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.213691Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.213732Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.224819Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.224961Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.235898Z node 3 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.236043Z node 3 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:40.237392Z node 3 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:40.237443Z node 3 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:40.237634Z node 3 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:40.237691Z node 3 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:40.238312Z node 3 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { } } Status { ErrorDescription: "unsupported command 0" } ErrorDescription: "unsupported command 0" Leader for TabletID 72057594037932033 is [0:0:0] sender: [4:26:2057] recipient: [4:23:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [4:26:2057] recipient: [4:23:2068] Leader for TabletID 72057594037932033 is [4:28:2070] sender: [4:29:2057] recipient: [4:23:2068] 2026-01-08T15:35:40.467988Z node 4 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:40.468957Z node 4 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:40.469201Z node 4 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:40.470399Z node 4 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:40.470962Z node 4 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:40.471157Z node 4 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.471187Z node 4 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.471339Z node 4 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:40.480399Z node 4 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:40.480531Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:40.480644Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:40.480761Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.480898Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.480994Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [4:28:2070] sender: [4:49:2057] recipient: [4:11:2058] 2026-01-08T15:35:40.492246Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:40.492402Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.514722Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.514865Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.514944Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.515021Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.515142Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.515205Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.515244Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.515284Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.525961Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.526101Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.536790Z node 4 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.536907Z node 4 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:40.538122Z node 4 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:40.538168Z node 4 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:40.538374Z node 4 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:40.538417Z node 4 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:40.539046Z node 4 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { } } Status { ErrorDescription: "unsupported command 0" } ErrorDescription: "unsupported command 0" |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes-UseSink |87.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_resource_pool/test-results/unittest/{meta.json ... results_accumulator.log} |87.8%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/test-results/unittest/{meta.json ... results_accumulator.log} >> TTxDataShardUploadRows::TestUploadRows [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:19.054297Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:19.196965Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:19.197340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:19.197520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:19.513596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:19.513766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:19.623050Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886515641682 != 1767886515641686 2026-01-08T15:35:19.639513Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:19.689988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:19.806072Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:20.156804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:20.174970Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:20.301886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:20.393588Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:35:20.393949Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:20.453759Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:35:20.454066Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:20.465442Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:20.465622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:20.467783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:20.467906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:20.467963Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:20.468421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:20.468734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:20.468823Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:35:20.469366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:20.469496Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:20.471333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:20.471404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:20.471465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:20.471813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:20.471937Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:20.472017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:35:20.483090Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:20.524128Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:20.524507Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:20.524658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:35:20.524706Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:20.524746Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:20.524799Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:20.525408Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:20.525464Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:20.525537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:20.525603Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:35:20.525631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:20.525675Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:20.525709Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:20.525931Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:20.526046Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:20.526264Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:20.526309Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:20.526508Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:20.526554Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:20.526714Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:35:20.526761Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:20.526825Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:20.527382Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:20.527639Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:20.527794Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:20.528149Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:20.528175Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:20.528200Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:20.528228Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:20.528330Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:891:2770], serverId# [1:903:2777], sessionId# [0:0:0] 2026-01-08T15:35:20.528589Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:20.528772Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:20.528824Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2026-01-08T15:35:20.530221Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:20.530284Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:20.541237Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:20.541394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:20.541501Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:20.541541Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:20.663863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:950:2806], serverId# [1:953:2809], sessionId# [0:0:0] 2026- ... ransaction::Execute at 72075186224037889 2026-01-08T15:35:39.971138Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:39.971171Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037889 2026-01-08T15:35:39.971351Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:39.971441Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:39.971594Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:39.971684Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 40] schema version# 1 2026-01-08T15:35:39.972241Z node 4 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:39.972706Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:39.973954Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:39.974018Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:39.974424Z node 4 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:39.974777Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:39.976067Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:39.976125Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:39.983234Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2026-01-08T15:35:39.983303Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:39.984250Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:39.984352Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:39.984485Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 2000} 2026-01-08T15:35:39.984525Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:39.985808Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:39.985882Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:39.985942Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:39.986018Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:39.986105Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:39.986203Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:39.988611Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:39.988746Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:39.988931Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:39.988987Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:39.989022Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:35:39.989079Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:39.989148Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:39.989219Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:39.992689Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:39.992990Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:39.993054Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:39.993207Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:39.993899Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2026-01-08T15:35:39.993956Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:40.004991Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:998:2846], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:40.005122Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1008:2851], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:40.005576Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:40.006340Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1014:2856], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:40.006781Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:40.011463Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:40.018424Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:40.018571Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:40.143951Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:40.144111Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:40.147390Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1012:2854], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:35:40.185672Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1071:2894] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:40.271197Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1139:2925], serverId# [4:1140:2926], sessionId# [0:0:0] 2026-01-08T15:35:40.271631Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037889 2026-01-08T15:35:40.271937Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886540271822 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:40.272160Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037889, row count=1 2026-01-08T15:35:40.286159Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:40.286254Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:40.291366Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1146:2931], serverId# [4:1147:2932], sessionId# [0:0:0] 2026-01-08T15:35:40.297129Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1148:2933], serverId# [4:1149:2934], sessionId# [0:0:0] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> CdcStreamChangeCollector::NewImage [GOOD] >> TxUsage::Write_And_Read_Small_Messages_2 >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata-UseSchemaSecrets [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties+UseSchemaSecrets >> TPQTest::TestSourceIdDropBySourceIdCount [GOOD] >> TPQTest::TestTimeRetention >> KikimrIcGateway::TestSecretsExistingValidation+UseSchemaSecrets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:31.145527Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:31.274810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:31.275202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:31.275365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:31.571727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:31.571887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:31.644111Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886528219958 != 1767886528219962 2026-01-08T15:35:31.655024Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:31.703918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:31.815445Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:32.185960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:32.186444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:32.188453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:32.189420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2026-01-08T15:35:32.191780Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:35:32.191850Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 1500 Status# 16 SEND to# [1:400:2399] Proxy marker# C1 2026-01-08T15:35:32.205413Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:32.238251Z node 1 :HIVE DEBUG: hive_impl.cpp:2519: HIVE#72057594037968897 ProcessTabletBalancer [(72057594046644480:1,0)] MaxUsage=0.000000000 on #1 MinUsage=0.000000000 on #1 Scatter=0.000000000 2026-01-08T15:35:32.238475Z node 1 :HIVE DEBUG: hive_impl.cpp:406: HIVE#72057594037968897 Handle BalancerOut 2026-01-08T15:35:32.324063Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2026-01-08T15:35:32.324214Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2026-01-08T15:35:32.324509Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 2000 in 0.500000s at 1.950000s 2026-01-08T15:35:32.325062Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1500, txid# 1 marker# C2 2026-01-08T15:35:32.325146Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 1500 Status# 17 SEND EvProposeTransactionStatus to# [1:400:2399] Proxy 2026-01-08T15:35:32.325711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 1500, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:32.327590Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2026-01-08T15:35:32.327734Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:10] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2026-01-08T15:35:32.327805Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:10] persistent tx 1 for mediator 72057594046382081 acknowledged 2026-01-08T15:35:32.327856Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:10] persistent tx 1 acknowledged 2026-01-08T15:35:32.328178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 1:0 2026-01-08T15:35:32.328251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2026-01-08T15:35:32.329208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2026-01-08T15:35:32.334004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:445: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2026-01-08T15:35:32.335760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:32.335870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:32.337012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2026-01-08T15:35:32.352668Z node 1 :HIVE DEBUG: hive_impl.cpp:55: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2026-01-08T15:35:32.385024Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 38 BindedChannels { StoragePoolName: "/Root:test" StoragePoolKind: "test" } BindedChannels { StoragePoolName: "/Root:test" StoragePoolKind: "test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2026-01-08T15:35:32.385175Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:354: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2026-01-08T15:35:32.385442Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:446: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type DataShard: {} 2026-01-08T15:35:32.385514Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:449: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for object (72057594046644480,38): {} 2026-01-08T15:35:32.385595Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:453: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2026-01-08T15:35:32.385791Z node 1 :HIVE DEBUG: hive_impl.cpp:2904: HIVE#72057594037968897 CreateTabletFollowers Tablet DataShard.72075186224037888.Leader.0 2026-01-08T15:35:32.395084Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2026-01-08T15:35:32.395340Z node 1 :HIVE DEBUG: hive_impl.cpp:1108: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2026-01-08T15:35:32.396124Z node 1 :HIVE DEBUG: hive_impl.cpp:93: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2026-01-08T15:35:32.396551Z node 1 :HIVE DEBUG: hive_impl.cpp:461: HIVE#72057594037968897 THive::Handle TEvControllerSelectGroupsResult: success Status: OK NewStyleQuerySupported: true MatchingGroups { Groups { ErasureSpecies: 0 GroupID: 2181038080 StoragePoolName: "/Root:test" AssuredResources { } CurrentResources { } PhysicalGroup: true Decommitted: false GroupSizeInUnits: 0 } } 2026-01-08T15:35:32.396685Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:63: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136944987000736}(72075186224037888,HIVE_REASSIGN_REASON_NO,[]) 2026-01-08T15:35:32.396793Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136944987000736}: tablet 72075186224037888 channel 0 assigned to group 2181038080 2026-01-08T15:35:32.396973Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136944987000736}: tablet 72075186224037888 channel 1 assigned to group 2181038080 2026-01-08T15:35:32.397106Z node 1 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Unknown -> Stopped 2026-01-08T15:35:32.397178Z node 1 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Stopped -> Booting 2026-01-08T15:35:32.397237Z node 1 :HIVE DEBUG: hive_impl.cpp:370: HIVE#72057594037968897 ProcessBootQueue (1) 2026-01-08T15:35:32.397455Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2026-01-08T15:35:32.397511Z node 1 :HIVE DEBUG: hive_impl.cpp:250: HIVE#72057594037968897 Handle ProcessBootQueue (size: 1) 2026-01-08T15:35:32.397569Z node 1 :HIVE DEBUG: hive_impl.cpp:1254: HIVE#72057594037968897 [FBN] Finding best node for tablet DataShard.72075186224037888.Leader.0 2026-01-08T15:35:32.397700Z node 1 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Booting -> Starting (Node 1) 2026-01-08T15:35:32.397847Z node 1 :HIVE DEBUG: hive_impl.cpp:330: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2026-01-08T15:35:32.397995Z node 1 :HIVE DEBUG: tx__start_tablet.cpp:31: HIVE#72057594037968897 THive::TTxStartTablet::Execute Tablet (72075186224037888,0) 2026-01-08T15:35:32.398235Z node 1 :HIVE DEBUG: tx__start_tablet.cpp:73: HIVE#72057594037968897 THive::TTxStartTablet::Execute, Sending TEvBootTablet(DataShard.72075186224037888.Leader.1) to node 1 storage {Version# 1 TabletID# 72075186224037888 T ... reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:41.143423Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715665, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: /Root/table-2 2026-01-08T15:35:41.147697Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.148021Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:41.148169Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715665 ssId 72057594046644480 seqNo 2:4 2026-01-08T15:35:41.148265Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715665 at tablet 72075186224037889 2026-01-08T15:35:41.148596Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:41.163287Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:136: HIVE#72057594037968897 THive::TTxDeleteTablet::Complete() SideEffects: {Notifications: 0x10080003 [2:325:2367] NKikimrLocal.TEvStopTablet TabletId: 72075186224037888 FollowerId: 0 Generation: 1,0x10040206 [2:399:2398] NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1 Actions: NKikimr::TTabletReqBlockBlobStorage} 2026-01-08T15:35:41.164267Z node 2 :TX_DATASHARD INFO: datashard.cpp:196: OnTabletStop: 72075186224037888 reason = ReasonStop 2026-01-08T15:35:41.165504Z node 2 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:35:41.165662Z node 2 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 2026-01-08T15:35:41.168539Z node 2 :HIVE DEBUG: tx__block_storage_result.cpp:23: HIVE#72057594037968897 THive::TTxBlockStorageResult::Execute(72075186224037888 OK) 2026-01-08T15:35:41.168652Z node 2 :HIVE DEBUG: tx__block_storage_result.cpp:64: HIVE#72057594037968897 THive::TTxBlockStorageResult::Complete(72075186224037888 OK) 2026-01-08T15:35:41.169453Z node 2 :HIVE DEBUG: hive_impl.cpp:925: HIVE#72057594037968897 THive::Handle::TEvInitiateDeleteStorage TabletId=72075186224037888 2026-01-08T15:35:41.169904Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2026-01-08T15:35:41.170021Z node 2 :HIVE DEBUG: hive_impl.cpp:508: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037888 2026-01-08T15:35:41.170082Z node 2 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2026-01-08T15:35:41.170475Z node 2 :HIVE DEBUG: tx__delete_tablet_result.cpp:26: HIVE#72057594037968897 THive::TTxDeleteTabletResult::Execute(72075186224037888 OK) 2026-01-08T15:35:41.181805Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:41.183481Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 281474976715665 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:35:41.183576Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 281474976715665 step# 4500 Status# 16 SEND to# [2:399:2398] Proxy marker# C1 2026-01-08T15:35:41.195502Z node 2 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037968897 THive::TTxDeleteTabletResult(72075186224037888)::Complete SideEffects {} 2026-01-08T15:35:41.282980Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2026-01-08T15:35:41.283122Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2026-01-08T15:35:41.283180Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2026-01-08T15:35:41.283515Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 5000 in 0.500000s at 4.950000s 2026-01-08T15:35:41.284030Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 4500, txid# 281474976715665 marker# C2 2026-01-08T15:35:41.284128Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 4500 Status# 17 SEND EvProposeTransactionStatus to# [2:399:2398] Proxy 2026-01-08T15:35:41.284883Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 4500 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 4500 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2026-01-08T15:35:41.284952Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:41.285166Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 4500, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:41.285655Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:41.285714Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:41.285768Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4500:281474976715665] in PlanQueue unit at 72075186224037889 2026-01-08T15:35:41.285987Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 4500:281474976715665 keys extracted: 0 2026-01-08T15:35:41.286139Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:41.287766Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:41.287937Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2026-01-08T15:35:41.288532Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:41.290997Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 4500} 2026-01-08T15:35:41.291093Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:41.291488Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2026-01-08T15:35:41.291613Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:24] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2026-01-08T15:35:41.291704Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:24] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2026-01-08T15:35:41.291755Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:24] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2026-01-08T15:35:41.291816Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:24] persistent tx 281474976715665 acknowledged 2026-01-08T15:35:41.292112Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:41.292193Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [4500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:41.292267Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2026-01-08T15:35:41.298696Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:41.300305Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1125: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2026-01-08T15:35:41.308664Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2026-01-08T15:35:41.308793Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:41.309636Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976715665:0 2026-01-08T15:35:41.309768Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 1 2026-01-08T15:35:41.314814Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2026-01-08T15:35:41.316318Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:41.339273Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2026-01-08T15:35:41.339494Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2026-01-08T15:35:41.341713Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3339: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2026-01-08T15:35:41.343002Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2974: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2026-01-08T15:35:41.343548Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 0 TabletID: 72075186224037889 2026-01-08T15:35:41.343638Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2026-01-08T15:35:41.343781Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2026-01-08T15:35:41.343963Z node 2 :HIVE DEBUG: tablet_info.cpp:518: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2026-01-08T15:35:41.344106Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_minstep/unittest >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable >> IncrementalRestoreScan::ChangeSenderEmpty |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> TTxDataShardUploadRows::RetryUploadRowsToShard [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] >> CdcStreamChangeCollector::OldImage [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit >> CdcStreamChangeCollector::SchemaChanges >> IncrementalRestoreScan::ChangeSenderSimple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:20.335593Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:20.479041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:20.479443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:20.479606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:20.787995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:20.788161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:20.874197Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886516347530 != 1767886516347534 2026-01-08T15:35:20.890634Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:20.939363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:21.029996Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:21.446862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:21.462897Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:21.588784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:21.657448Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:35:21.657769Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:21.778815Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:21.778983Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:21.781152Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:21.781293Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:21.781356Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:21.781914Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:21.782106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:21.782205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:35:21.799074Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:21.881184Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:21.881488Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:21.881665Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:35:21.881736Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:21.881787Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:21.881828Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:21.882557Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:21.882709Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:21.882831Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:21.882882Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:21.882935Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:21.882985Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:21.883469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:35:21.883656Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:21.884007Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:21.884146Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:21.886283Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:21.907065Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:21.907188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:22.032849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:35:22.038555Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:22.038661Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.039000Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.039050Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:22.039117Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:22.039483Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:22.039693Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:22.040553Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.040649Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:22.046051Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:22.046677Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:22.049194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:22.049264Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.050464Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:22.050554Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.051991Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.052048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:22.052103Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:22.052180Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:22.052240Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:22.052406Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.059709Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:22.062036Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:22.062139Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:22.063242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:22.077358Z node 1 :TX_DATASHARD DEBU ... Heartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.526288Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.528302Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:41.528503Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:41.528578Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:41.541907Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:41.542102Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:41.542162Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:235: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2026-01-08T15:35:41.542201Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2026-01-08T15:35:41.543425Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.562614Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:41.768640Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:41.768725Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.769031Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:41.769085Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:41.769138Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715658] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:41.769381Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715658 keys extracted: 0 2026-01-08T15:35:41.769522Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:41.769965Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:41.770952Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:41.817449Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2026-01-08T15:35:41.817578Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.817625Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.817678Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.817770Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:41.817843Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2026-01-08T15:35:41.817950Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.820406Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2026-01-08T15:35:41.820514Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:41.829780Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1064:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.829922Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1074:2898], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.829999Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.831092Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1079:2902], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.831263Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.836547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:41.843783Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:42.038300Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:42.044429Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1078:2901], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:35:42.073208Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1137:2941] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:42.171469Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1167:2958], serverId# [4:1168:2959], sessionId# [0:0:0] 2026-01-08T15:35:42.171988Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037888 2026-01-08T15:35:42.172288Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886542172172 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:42.172502Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037888, row count=1 2026-01-08T15:35:42.184291Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:42.184402Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.278634Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:4] at 72075186224037888 2026-01-08T15:35:42.278938Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886542278844 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:42.279073Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:4] at 72075186224037888, row count=1 2026-01-08T15:35:42.290919Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:42.290983Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.422251Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:5] at 72075186224037888 2026-01-08T15:35:42.422607Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 3 Group: 1767886542422483 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:42.422754Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:5] at 72075186224037888, row count=1 2026-01-08T15:35:42.434248Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:42.434363Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.436707Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1212:2988], serverId# [4:1213:2989], sessionId# [0:0:0] 2026-01-08T15:35:42.444646Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1214:2990], serverId# [4:1215:2991], sessionId# [0:0:0] >> TPQTest::TestTimeRetention [GOOD] >> TPQTest::TestStorageRetention >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest >> IncrementalRestoreScan::Empty >> BsControllerConfig::PDiskCreate >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets+UseAuthToken [GOOD] |87.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_resource_pool/test-results/unittest/{meta.json ... results_accumulator.log} |87.8%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation+UseSchemaSecrets [GOOD] Test command err: Trying to start YDB, gRPC: 20434, MsgBus: 9828 2026-01-08T15:35:01.177093Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014705101331975:2132];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:01.177129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:01.710481Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:01.772865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:01.772951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:01.930495Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014705101331880:2081] 1767886501157433 != 1767886501157436 2026-01-08T15:35:01.931351Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:01.967349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:01.978412Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:02.254549Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:02.348544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:02.348568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:02.348575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:02.348657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:03.133997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:03.144971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:03.197718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:35:05.917027Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014722281202005:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.917137Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.918049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014722281202014:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.918107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.178418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014705101331975:2132];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:06.178493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:06.237613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.375612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.421083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.450667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.508341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014726576169619:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.508467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.508831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014726576169622:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.508907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.509120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014726576169626:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.513949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:06.531778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014726576169628:2366], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2026-01-08T15:35:06.667749Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014726576169679:2771] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 47], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 28789, MsgBus: 28821 2026-01-08T15:35:08.234265Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014738698739235:2086];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:08.277843Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:08.337871Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:08.432707Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:08.443394Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014738698739173:2081] 1767886508204840 != 1767886508204843 2026-01-08T15:35:08.497673Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:08.497739Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:08.512541Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:08.594931Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:08.665109Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:08.665143Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:08.665150Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:08.665233Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:09.171657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:09.178669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:35:09.192922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part ... 34.115598Z node 4 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7593014847132739059:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:34.115685Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:34.144263Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:34.334506Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:34.347358Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:34.350744Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [4:7593014847132738937:2081] 1767886534111374 != 1767886534111377 2026-01-08T15:35:34.363874Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:34.363982Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:34.414370Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:34.590183Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:34.643098Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:34.643125Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:34.643135Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:34.643240Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:35.133486Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:35.419787Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:35.430604Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:35.453436Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:35.549014Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:35.779858Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:35.906745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.986536Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014864312609990:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.986675Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.994536Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014864312610000:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.994667Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.118446Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014847132739059:2160];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:39.118586Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:39.154215Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.212395Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.280000Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.369154Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.459733Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.551805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.637614Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.722898Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.884318Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014868607578165:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.884481Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.885147Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014868607578170:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.885206Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014868607578171:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.885248Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.889621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:39.901355Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014868607578174:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:39.972935Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014868607578227:3765] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:22.922774Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:23.057386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:23.057763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:23.057878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:23.346049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:23.346163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:23.431624Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886519870279 != 1767886519870283 2026-01-08T15:35:23.446775Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:23.502725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:23.628303Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:24.057279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:24.071768Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:24.196091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:24.245588Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:35:24.245891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:24.297712Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:35:24.297966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:24.307518Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:24.307666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:24.309546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:24.309652Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:24.309708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:24.310118Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:24.310409Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:24.310511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:35:24.310981Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:24.311083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:24.312615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:24.312706Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:24.312762Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:24.313083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:24.313202Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:24.313270Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:35:24.327044Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:24.385646Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:24.385925Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:24.386072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:35:24.386117Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:24.386160Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:24.386208Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:24.386920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:24.386981Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:24.387058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:24.387138Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:35:24.387169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:24.387208Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:24.387243Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:24.387427Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:24.387538Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:24.387762Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:24.387862Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:24.387925Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:24.387976Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:24.388127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:35:24.388173Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:24.388231Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:24.388761Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:24.389067Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:24.389196Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:24.389701Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:24.389743Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:24.389778Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:24.389817Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:24.389987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:891:2770], serverId# [1:903:2777], sessionId# [0:0:0] 2026-01-08T15:35:24.390419Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:24.390648Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:24.390723Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2026-01-08T15:35:24.392701Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:24.393035Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:24.404061Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:24.404196Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:24.404296Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:24.404333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:24.530500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:950:2806], serverId# [1:953:2809], sessionId# [0:0:0] 2026- ... tashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:43.758692Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:43.758817Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:43.758875Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:35:43.758960Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:43.759033Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:43.759129Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:43.759818Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2026-01-08T15:35:43.759870Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:35:43.762527Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:43.762590Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:43.762646Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:43.762712Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:43.762769Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:43.762838Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:43.763043Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 2000} 2026-01-08T15:35:43.763102Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:43.765643Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:43.765774Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:43.765836Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:43.766247Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:43.766300Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037890 2026-01-08T15:35:43.766384Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037890 2026-01-08T15:35:43.766455Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:43.766511Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:43.766584Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:35:43.774655Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:43.775621Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2026-01-08T15:35:43.775730Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:43.776285Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:43.776661Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:43.776721Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:43.776835Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:43.777419Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2026-01-08T15:35:43.777470Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2026-01-08T15:35:43.793677Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1046:2883], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.793779Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1056:2888], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.793836Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.794770Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1062:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.794874Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.798822Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:43.804454Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:43.804563Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:43.804607Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:43.984845Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:43.984982Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:43.985043Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:35:43.988569Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1060:2891], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:35:44.026272Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1120:2932] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 46], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:44.142910Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1224:2975], serverId# [4:1225:2976], sessionId# [0:0:0] 2026-01-08T15:35:44.143390Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037889 2026-01-08T15:35:44.143741Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886544143604 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:44.143962Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886544143604 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:44.144117Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037889, row count=1 2026-01-08T15:35:44.157180Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 41] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:44.157306Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:44.170688Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1231:2981], serverId# [4:1232:2982], sessionId# [0:0:0] 2026-01-08T15:35:44.183207Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1233:2983], serverId# [4:1234:2984], sessionId# [0:0:0] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:23.507097Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:23.653366Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:23.653751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:23.653897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:23.975603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:23.975767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:24.067016Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886519774776 != 1767886519774780 2026-01-08T15:35:24.078943Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:24.129236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:24.236978Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:24.559338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:24.576904Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:24.710727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:24.796962Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:35:24.797257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:24.866897Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:35:24.867174Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:24.884269Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:24.884477Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:24.886497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:24.886625Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:24.886689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:24.887117Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:24.887421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:24.887546Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:35:24.888098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:24.888229Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:24.889783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:24.889869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:24.889927Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:24.890311Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:24.890602Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:24.890687Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:35:24.903082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:24.947490Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:24.947765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:24.947901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:35:24.947951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:24.947993Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:24.948038Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:24.948595Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:24.948644Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:24.948714Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:24.948787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:35:24.948816Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:24.948853Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:24.948882Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:24.949046Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:24.949145Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:24.949347Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:24.949394Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:24.949436Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:24.949478Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:24.949626Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:35:24.949671Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:24.949726Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:24.950253Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:24.950570Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:24.950750Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:24.951285Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:24.951325Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:24.951358Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:24.951402Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:24.951548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:891:2770], serverId# [1:903:2777], sessionId# [0:0:0] 2026-01-08T15:35:24.951904Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:24.952113Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:24.952182Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2026-01-08T15:35:24.954054Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:24.954159Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:24.968449Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:24.968592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:24.968727Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:24.968775Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:25.101069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:950:2806], serverId# [1:953:2809], sessionId# [0:0:0] 2026- ... E TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:43.810186Z node 4 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:43.810704Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:43.812213Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:43.812281Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:43.819593Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2026-01-08T15:35:43.819678Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:43.820587Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:43.820689Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:43.820822Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 2000} 2026-01-08T15:35:43.820865Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:43.822135Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:43.822198Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:43.822271Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:43.826470Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:43.826585Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:43.826729Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:43.829518Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:43.834626Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:43.835011Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:43.835087Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:43.835133Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:35:43.835207Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:43.835266Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:43.835347Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:43.839606Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:43.840009Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:43.840095Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:43.840259Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:43.841042Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2026-01-08T15:35:43.841101Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:43.858724Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:998:2846], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.858852Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1008:2851], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.859315Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.860041Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1014:2856], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.860463Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.864762Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:43.872196Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:43.872343Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:43.999943Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:44.000082Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:44.003464Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1012:2854], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:35:44.040970Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1071:2894] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:44.136679Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1139:2925], serverId# [4:1140:2926], sessionId# [0:0:0] 2026-01-08T15:35:44.137203Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037889 2026-01-08T15:35:44.137547Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886544137424 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:44.137716Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037889, row count=1 2026-01-08T15:35:44.148963Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:44.149062Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:44.234991Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037889 2026-01-08T15:35:44.235404Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886544235273 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:44.235634Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 3 Group: 1767886544235273 Step: 2500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2026-01-08T15:35:44.235778Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037889, row count=1 2026-01-08T15:35:44.247194Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:44.247289Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:44.252826Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1179:2957], serverId# [4:1180:2958], sessionId# [0:0:0] 2026-01-08T15:35:44.259945Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [4:1181:2959], serverId# [4:1182:2960], sessionId# [0:0:0] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:20.583206Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:20.712070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:20.712472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:20.712631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:21.018146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:21.018347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:21.120284Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886516855595 != 1767886516855599 2026-01-08T15:35:21.142922Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:21.199078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:21.336672Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:21.674179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:21.691028Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:21.817561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:21.859393Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:35:21.859735Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:21.910726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:21.910861Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:21.912777Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:21.912905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:21.912986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:21.913484Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:21.913696Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:21.913796Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:35:21.924588Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:21.960633Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:21.960942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:21.961121Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:35:21.961161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:21.961218Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:21.961260Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:21.961856Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:21.961989Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:21.962085Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:21.962127Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:21.962178Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:21.962225Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:21.962658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:35:21.962884Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:21.963190Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:21.963312Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:21.965224Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:21.977666Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:21.977810Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:22.101179Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:35:22.110919Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:22.111043Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.111347Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.111406Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:22.111493Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:22.111930Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:22.112103Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:22.113033Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.113138Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:22.115489Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:22.116051Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:22.118852Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:22.118923Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.119795Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:22.119876Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.121383Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.121438Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:22.121494Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:22.121564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:22.121624Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:22.121719Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.127863Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:22.129854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:22.129948Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:22.130953Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:22.145257Z node 1 :TX_DATASHARD DEBU ... :98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:41.701478Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.702520Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.702574Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:41.702661Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:41.702736Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:41.702803Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:41.702892Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.704311Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.706263Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:41.706484Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:41.706558Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:41.719821Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:41.720004Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:41.720067Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:235: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2026-01-08T15:35:41.720105Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2026-01-08T15:35:41.721179Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.739199Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:41.906585Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:41.906683Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.907014Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:41.907078Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:41.907138Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715658] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:41.907393Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715658 keys extracted: 0 2026-01-08T15:35:41.907559Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:41.908099Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:41.909123Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:41.963802Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2026-01-08T15:35:41.963931Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.963987Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.964049Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.964146Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:41.964227Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2026-01-08T15:35:41.964339Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.967050Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2026-01-08T15:35:41.967145Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:41.979163Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1064:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.979341Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1074:2898], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.979438Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.980594Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1079:2902], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.980758Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:41.986052Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:41.993800Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:42.208321Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:42.223742Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1078:2901], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:35:42.251465Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1137:2941] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:42.336100Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1167:2958], serverId# [4:1168:2959], sessionId# [0:0:0] 2026-01-08T15:35:42.336491Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037888 2026-01-08T15:35:42.336748Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886542336657 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:42.336912Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037888, row count=1 2026-01-08T15:35:42.353317Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:42.353427Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.476062Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:4] at 72075186224037888 2026-01-08T15:35:42.476355Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886542476250 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:42.476511Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:4] at 72075186224037888, row count=1 2026-01-08T15:35:42.487655Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:42.487755Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.490154Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1194:2977], serverId# [4:1195:2978], sessionId# [0:0:0] 2026-01-08T15:35:42.497003Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1196:2979], serverId# [4:1197:2980], sessionId# [0:0:0] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest |87.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |87.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |87.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |87.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:23.717211Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:23.862105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:23.862444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:23.862569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:24.148943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:24.149118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:24.227228Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886519557799 != 1767886519557803 2026-01-08T15:35:24.238240Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:24.289679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:24.393371Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:24.777995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:24.794550Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:24.925350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:24.974181Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:897:2773] 2026-01-08T15:35:24.974535Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:25.024904Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:899:2775] 2026-01-08T15:35:25.025168Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:25.035206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:25.035376Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:25.037446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:25.037553Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:25.037616Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:25.038071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:25.038383Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:25.038481Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:928:2773] in generation 1 2026-01-08T15:35:25.038959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:25.039075Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:25.040625Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:25.040695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:25.040748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:25.041056Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:25.041171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:25.041243Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:929:2775] in generation 1 2026-01-08T15:35:25.052835Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:25.089735Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:25.089984Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:25.090113Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:932:2794] 2026-01-08T15:35:25.090160Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:25.090204Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:25.090244Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:25.090946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:25.091011Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:25.091095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:25.091161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:933:2795] 2026-01-08T15:35:25.091188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:25.091234Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:25.091267Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:25.091475Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:25.091585Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:25.091869Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:25.091921Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:25.091967Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:25.092046Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:25.092220Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:890:2769], serverId# [1:901:2776], sessionId# [0:0:0] 2026-01-08T15:35:25.092271Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:25.092342Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:25.093031Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:25.093345Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:25.093507Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:25.094031Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:25.094076Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:25.094109Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:25.094171Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:25.094374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:891:2770], serverId# [1:903:2777], sessionId# [0:0:0] 2026-01-08T15:35:25.094704Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:25.094913Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:25.094989Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2026-01-08T15:35:25.096990Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:25.097087Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:25.111892Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:25.112012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:25.112119Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:25.112161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:25.239950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:950:2806], serverId# [1:953:2809], sessionId# [0:0:0] 2026- ... :98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:44.827414Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:44.828411Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:44.828466Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:44.828516Z node 4 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:44.828590Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:44.828654Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:44.828762Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:44.835898Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:44.838968Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:44.839249Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:44.839348Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:44.865404Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:44.865614Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:44.865678Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:235: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2026-01-08T15:35:44.865718Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2026-01-08T15:35:44.870774Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:44.886874Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:45.090386Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:45.090474Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:45.090819Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:45.090878Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:45.090927Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715658] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:45.091143Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715658 keys extracted: 0 2026-01-08T15:35:45.091279Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:45.091743Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:45.092805Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:45.155093Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2026-01-08T15:35:45.155251Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:45.155305Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:45.155360Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:45.155446Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:45.155516Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2026-01-08T15:35:45.155636Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:45.158199Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2026-01-08T15:35:45.158306Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:45.179646Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1064:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:45.179829Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1074:2898], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:45.179935Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:45.181083Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1079:2902], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:45.181254Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:45.189171Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:45.197270Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:45.402310Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:45.405512Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1078:2901], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:35:45.433072Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1137:2941] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:45.522962Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1167:2958], serverId# [4:1168:2959], sessionId# [0:0:0] 2026-01-08T15:35:45.523426Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037888 2026-01-08T15:35:45.523738Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886545523611 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:45.523966Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037888, row count=1 2026-01-08T15:35:45.535114Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:45.535209Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:45.631228Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:4] at 72075186224037888 2026-01-08T15:35:45.631546Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 1767886545631430 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:45.631737Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:4] at 72075186224037888, row count=1 2026-01-08T15:35:45.643054Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:45.643130Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:45.645219Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1194:2977], serverId# [4:1195:2978], sessionId# [0:0:0] 2026-01-08T15:35:45.666982Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1196:2979], serverId# [4:1197:2980], sessionId# [0:0:0] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetryOnRetryableError >> BsControllerConfig::SelectAllGroups >> TPQTest::TestStorageRetention [GOOD] >> TPQTest::TestStatusWithMultipleConsumers >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata+UseSchemaSecrets+UseAuthToken [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query Test command err: Trying to start YDB, gRPC: 18485, MsgBus: 23034 2026-01-08T15:34:57.567219Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014688765416373:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:57.567417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:57.598836Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:58.082449Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:58.121513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:58.121607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:58.322768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:58.455841Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:58.466997Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014688765416153:2081] 1767886497508834 != 1767886497508837 2026-01-08T15:34:58.533148Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:58.562579Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:58.770832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:58.770859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:58.770867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:58.770955Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:59.329439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:59.351768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:34:59.436339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.622215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.847477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:59.923089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.169220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014710240254526:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.169352Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.169897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014710240254536:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.169948Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:02.535706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.558438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014688765416373:2256];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:02.560802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:02.606131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.655925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.697900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.735767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.781137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.842914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.919408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:03.033013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714535222716:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.033091Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.033608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714535222721:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.033666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014714535222722:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.033800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:03.039369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:03.062654Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014714535222725:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:03.167362Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014714535222778:3780] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: E ... e status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:37.803045Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:37.803158Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:37.813739Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:37.899142Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:37.899165Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:37.899174Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:37.899265Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:37.946343Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:38.591615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:38.598981Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:35:38.609430Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.636553Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:38.688685Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.895621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.018086Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.371761Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014882938108657:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:42.371874Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:42.372377Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014882938108667:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:42.372437Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:42.489457Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.565437Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.610844Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.629792Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014861463270404:2140];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:42.629896Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:42.659600Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.716021Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.810887Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.884334Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:43.000283Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:43.116908Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014887233076838:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.117017Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.117629Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014887233076843:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.117733Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.117812Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014887233076844:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:43.123601Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:43.144952Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014887233076847:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:35:43.221866Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014887233076898:3771] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:45.661086Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) 2026-01-08T15:35:45.731705Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) 2026-01-08T15:35:45.741558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:352) >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> TPQTest::TestStatusWithMultipleConsumers [GOOD] >> TPQTest::TestTabletRestoreEventsOrder >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRows >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyWithOmitIndexesTrueSkipsIndexes |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> KikimrIcGateway::TestLoadDataSourceProperties-UseSchemaSecrets [GOOD] >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithMultipleIndexes |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |87.9%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption >> TPQTest::TestTabletRestoreEventsOrder [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:32.395825Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:32.565929Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:32.567788Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:32.568087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:32.896297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:32.896466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:33.065891Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886528990608 != 1767886528990612 2026-01-08T15:35:33.078227Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:33.127696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:33.241390Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:33.623453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:33.623850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:33.625760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:33.629403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2026-01-08T15:35:33.631269Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:35:33.631328Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 1500 Status# 16 SEND to# [1:400:2399] Proxy marker# C1 2026-01-08T15:35:33.643515Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:33.670731Z node 1 :HIVE DEBUG: hive_impl.cpp:2519: HIVE#72057594037968897 ProcessTabletBalancer [(72057594046644480:1,0)] MaxUsage=0.000000000 on #1 MinUsage=0.000000000 on #1 Scatter=0.000000000 2026-01-08T15:35:33.670899Z node 1 :HIVE DEBUG: hive_impl.cpp:406: HIVE#72057594037968897 Handle BalancerOut 2026-01-08T15:35:33.753128Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2026-01-08T15:35:33.753244Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2026-01-08T15:35:33.753509Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 2000 in 0.500000s at 1.950000s 2026-01-08T15:35:33.753980Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1500, txid# 1 marker# C2 2026-01-08T15:35:33.754047Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 1500 Status# 17 SEND EvProposeTransactionStatus to# [1:400:2399] Proxy 2026-01-08T15:35:33.754586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 1500, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:33.756476Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2026-01-08T15:35:33.756591Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:10] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2026-01-08T15:35:33.756660Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:10] persistent tx 1 for mediator 72057594046382081 acknowledged 2026-01-08T15:35:33.756732Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:10] persistent tx 1 acknowledged 2026-01-08T15:35:33.757061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 1:0 2026-01-08T15:35:33.757143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2026-01-08T15:35:33.758112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2026-01-08T15:35:33.770151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:445: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2026-01-08T15:35:33.772371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:33.772482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:33.773619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2026-01-08T15:35:33.778078Z node 1 :HIVE DEBUG: hive_impl.cpp:55: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2026-01-08T15:35:33.790911Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 38 BindedChannels { StoragePoolName: "/Root:test" StoragePoolKind: "test" } BindedChannels { StoragePoolName: "/Root:test" StoragePoolKind: "test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2026-01-08T15:35:33.791038Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:354: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2026-01-08T15:35:33.791304Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:446: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type DataShard: {} 2026-01-08T15:35:33.791374Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:449: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for object (72057594046644480,38): {} 2026-01-08T15:35:33.791444Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:453: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2026-01-08T15:35:33.791620Z node 1 :HIVE DEBUG: hive_impl.cpp:2904: HIVE#72057594037968897 CreateTabletFollowers Tablet DataShard.72075186224037888.Leader.0 2026-01-08T15:35:33.792285Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2026-01-08T15:35:33.792439Z node 1 :HIVE DEBUG: hive_impl.cpp:1108: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2026-01-08T15:35:33.793085Z node 1 :HIVE DEBUG: hive_impl.cpp:93: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2026-01-08T15:35:33.793468Z node 1 :HIVE DEBUG: hive_impl.cpp:461: HIVE#72057594037968897 THive::Handle TEvControllerSelectGroupsResult: success Status: OK NewStyleQuerySupported: true MatchingGroups { Groups { ErasureSpecies: 0 GroupID: 2181038080 StoragePoolName: "/Root:test" AssuredResources { } CurrentResources { } PhysicalGroup: true Decommitted: false GroupSizeInUnits: 0 } } 2026-01-08T15:35:33.793586Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:63: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136266345467808}(72075186224037888,HIVE_REASSIGN_REASON_NO,[]) 2026-01-08T15:35:33.793659Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136266345467808}: tablet 72075186224037888 channel 0 assigned to group 2181038080 2026-01-08T15:35:33.793831Z node 1 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{136266345467808}: tablet 72075186224037888 channel 1 assigned to group 2181038080 2026-01-08T15:35:33.793938Z node 1 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Unknown -> Stopped 2026-01-08T15:35:33.794015Z node 1 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Stopped -> Booting 2026-01-08T15:35:33.794067Z node 1 :HIVE DEBUG: hive_impl.cpp:370: HIVE#72057594037968897 ProcessBootQueue (1) 2026-01-08T15:35:33.794264Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2026-01-08T15:35:33.794310Z node 1 :HIVE DEBUG: hive_impl.cpp:250: HIVE#72057594037968897 Handle ProcessBootQueue (size: 1) 2026-01-08T15:35:33.794491Z node 1 :HIVE DEBUG: hive_impl.cpp:1254: HIVE#72057594037968897 [FBN] Finding best node for tablet DataShard.72075186224037888.Leader.0 2026-01-08T15:35:33.794618Z node 1 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037888.Leader.0) VolatileState: Booting -> Starting (Node 1) 2026-01-08T15:35:33.794784Z node 1 :HIVE DEBUG: hive_impl.cpp:330: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2026-01-08T15:35:33.794951Z node 1 :HIVE DEBUG: tx__start_tablet.cpp:31: HIVE#72057594037968897 THive::TTxStartTablet::Execute Tablet (72075186224037888,0) 2026-01-08T15:35:33.795192Z node 1 :HIVE DEBUG: tx__start_tablet.cpp:73: HIVE#72057594037968897 THive::TTxStartTablet::Execute, Sending TEvBootTablet(DataShard.72075186224037888.Leader.1) to node 1 storage {Version# 1 TabletID# 72075186224037888 T ... :TX_DATASHARD INFO: datashard.cpp:196: OnTabletStop: 72075186224037888 reason = ReasonStop 2026-01-08T15:35:47.812817Z node 2 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:35:47.812988Z node 2 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 2026-01-08T15:35:47.816542Z node 2 :HIVE DEBUG: tx__block_storage_result.cpp:23: HIVE#72057594037968897 THive::TTxBlockStorageResult::Execute(72075186224037888 OK) 2026-01-08T15:35:47.816637Z node 2 :HIVE DEBUG: tx__block_storage_result.cpp:64: HIVE#72057594037968897 THive::TTxBlockStorageResult::Complete(72075186224037888 OK) 2026-01-08T15:35:47.817006Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2026-01-08T15:35:47.817137Z node 2 :HIVE DEBUG: hive_impl.cpp:925: HIVE#72057594037968897 THive::Handle::TEvInitiateDeleteStorage TabletId=72075186224037888 2026-01-08T15:35:47.817443Z node 2 :HIVE DEBUG: hive_impl.cpp:508: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037888 2026-01-08T15:35:47.817510Z node 2 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2026-01-08T15:35:47.817832Z node 2 :HIVE DEBUG: tx__delete_tablet_result.cpp:26: HIVE#72057594037968897 THive::TTxDeleteTabletResult::Execute(72075186224037888 OK) 2026-01-08T15:35:47.828936Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:47.839775Z node 2 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037968897 THive::TTxDeleteTabletResult(72075186224037888)::Complete SideEffects {} 2026-01-08T15:35:47.861110Z node 2 :GLOBAL WARN: log.cpp:841: fline=events.h:105;event=ev_write_error;status=STATUS_ABORTED;details=DataShard 72075186224037889 is blocked by a schema operation;tx_id=281474976715662; 2026-01-08T15:35:47.861298Z node 2 :TX_DATASHARD INFO: datashard_pipeline.cpp:1318: Outdated Tx 281474976715662 is cleaned at tablet 72075186224037889 and outdatedStep# 34500 2026-01-08T15:35:47.861409Z node 2 :TX_DATASHARD INFO: datashard__cleanup_tx.cpp:38: Cleaned up old txs at 72075186224037889 TxInFly 0 2026-01-08T15:35:47.861645Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:47.861715Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715666 ssId 72057594046644480 seqNo 2:4 2026-01-08T15:35:47.861771Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715666 at tablet 72075186224037889 2026-01-08T15:35:47.861969Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:47.862173Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:47.862258Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:47.862295Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:47.862356Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 1 2026-01-08T15:35:47.873239Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:47.873376Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:47.874952Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 281474976715666 HANDLE EvProposeTransaction marker# C0 2026-01-08T15:35:47.875028Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 281474976715666 step# 35000 Status# 16 SEND to# [2:399:2398] Proxy marker# C1 2026-01-08T15:35:47.937307Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715666 has been planned 2026-01-08T15:35:47.937436Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715666 for mediator 72057594046382081 tablet 72057594046644480 2026-01-08T15:35:47.937491Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715666 for mediator 72057594046382081 tablet 72075186224037889 2026-01-08T15:35:47.937751Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 35500 in 0.500000s at 35.450000s 2026-01-08T15:35:47.938209Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:580: Send from# 72057594046316545 to mediator# 72057594046382081, step# 35000, txid# 281474976715666 marker# C2 2026-01-08T15:35:47.938300Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715666 stepId# 35000 Status# 17 SEND EvProposeTransactionStatus to# [2:399:2398] Proxy 2026-01-08T15:35:47.938814Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 35000, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:47.939607Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715666 at step 35000 at tablet 72075186224037889 { Transactions { TxId: 281474976715666 AckTo { RawX1: 0 RawX2: 0 } } Step: 35000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2026-01-08T15:35:47.939690Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:47.939916Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:47.939970Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:47.940021Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [35000:281474976715666] in PlanQueue unit at 72075186224037889 2026-01-08T15:35:47.940221Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 35000:281474976715666 keys extracted: 0 2026-01-08T15:35:47.940358Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:47.940912Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:47.941007Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2026-01-08T15:35:47.941477Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:47.943764Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 35000} 2026-01-08T15:35:47.943851Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:47.944115Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2026-01-08T15:35:47.944221Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:54] persistent tx 281474976715666 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2026-01-08T15:35:47.944279Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:54] persistent tx 281474976715666 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2026-01-08T15:35:47.944317Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:54] persistent tx 281474976715666 for mediator 72057594046382081 acknowledged 2026-01-08T15:35:47.944365Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:54] persistent tx 281474976715666 acknowledged 2026-01-08T15:35:47.944866Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:47.944942Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [35000 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:47.945009Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715666 state PreOffline TxInFly 0 2026-01-08T15:35:47.945101Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:47.945282Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1125: All parts have reached barrier, tx: 281474976715666, done: 0, blocked: 1 2026-01-08T15:35:47.951459Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715666 datashard 72075186224037889 state PreOffline 2026-01-08T15:35:47.951562Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:47.952612Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976715666:0 2026-01-08T15:35:47.953175Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:47.973275Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2026-01-08T15:35:47.973545Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2026-01-08T15:35:47.975408Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3339: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2026-01-08T15:35:47.976312Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2974: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2026-01-08T15:35:47.976739Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 0 TabletID: 72075186224037889 2026-01-08T15:35:47.976801Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2026-01-08T15:35:47.976912Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2026-01-08T15:35:47.977047Z node 2 :HIVE DEBUG: tablet_info.cpp:518: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2026-01-08T15:35:47.977162Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_minstep/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:39.735797Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:39.884890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:39.885112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:39.885210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:40.168619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:40.168777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:40.268248Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886536651442 != 1767886536651446 2026-01-08T15:35:40.279819Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:40.328669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:40.437086Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:40.751196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:40.765774Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:40.889881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:40.978095Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:40.979292Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:40.979645Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:35:40.979966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.028370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:41.029175Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.029285Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.032587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:41.032687Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:41.032756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:41.033150Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.033332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.033435Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:35:41.046633Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:41.115226Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:41.115501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:41.115643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:35:41.115722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:41.115786Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:41.115831Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.116117Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:41.116172Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:41.116554Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:41.116669Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:41.116771Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:41.116813Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:41.116867Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:35:41.116928Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:35:41.116990Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:35:41.117030Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:41.117077Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:41.117549Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:41.117591Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:41.117634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:35:41.117748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:35:41.117788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:35:41.117914Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:41.118207Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:35:41.118282Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:41.118412Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:41.118469Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:35:41.118528Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:35:41.118576Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:35:41.118616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:35:41.118937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:35:41.118985Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:35:41.119029Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:35:41.119068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:35:41.119147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:35:41.119195Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:35:41.119250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:35:41.119288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:35:41.119317Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:35:41.121091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:35:41.121166Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:41.135034Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:41.135122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... 73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:300:2344], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:47.389175Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:47.389318Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:47.639628Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:47.639786Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:47.667614Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886544214336 != 1767886544214339 2026-01-08T15:35:47.675357Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:47.722059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:47.811430Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:48.161464Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:48.176397Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:48.300737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.324195Z node 2 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:886:2765] 2026-01-08T15:35:48.324461Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:48.379949Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:48.380117Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:48.382235Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:48.382360Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:48.382433Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:48.382967Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:48.383141Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:48.383249Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [2:901:2765] in generation 1 2026-01-08T15:35:48.394210Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:48.394342Z node 2 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:48.394483Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:48.394607Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [2:903:2775] 2026-01-08T15:35:48.394655Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:48.394700Z node 2 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:48.394743Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:48.395208Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:48.395337Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:48.395454Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:48.395514Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:48.395571Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:48.395624Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:48.395768Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:883:2763], serverId# [2:888:2766], sessionId# [0:0:0] 2026-01-08T15:35:48.398463Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:48.398782Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:48.398878Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:48.400994Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:48.416021Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:48.416229Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:48.544365Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:917:2783], serverId# [2:919:2785], sessionId# [0:0:0] 2026-01-08T15:35:48.545071Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:48.545140Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:48.545767Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:48.545830Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:48.545879Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:48.546201Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:48.550909Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:48.552083Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:48.552177Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:48.552710Z node 2 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:48.553185Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:48.559317Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:48.559412Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:48.560106Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:48.560194Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:48.561765Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:48.561819Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:48.561869Z node 2 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:48.561950Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:48.562011Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:48.562102Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:48.563647Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:48.565813Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:48.566176Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:48.566246Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:48.583514Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:953:2811], serverId# [2:954:2812], sessionId# [0:0:0] 2026-01-08T15:35:48.583720Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:209: Rejecting bulk upsert request on datashard: tablet# 72075186224037888, error# Can't execute bulk upsert at replicated table |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_upload_rows/unittest >> BsControllerConfig::SelectAllGroups [GOOD] >> BsControllerConfig::SoleCommandErrorWhenCombined >> BsControllerConfig::PDiskCreate [GOOD] >> BsControllerConfig::ReassignGroupDisk |87.9%| [TA] $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:47.276270Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:47.412499Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:47.412890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:47.413043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:47.704235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:47.704358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:47.793140Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886544302444 != 1767886544302448 2026-01-08T15:35:47.804413Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:47.852746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:47.962040Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:48.379776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2026-01-08T15:35:48.380057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.380324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2026-01-08T15:35:48.380377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2026-01-08T15:35:48.380655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:48.380774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:48.381734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:48.381912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2026-01-08T15:35:48.382170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.382247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2026-01-08T15:35:48.382309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2026-01-08T15:35:48.382387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 2 -> 3 2026-01-08T15:35:48.383175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.383231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:35:48.383277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 3 -> 128 2026-01-08T15:35:48.383827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.383892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.383969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:48.384020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2026-01-08T15:35:48.388306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:35:48.389091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2026-01-08T15:35:48.389264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2026-01-08T15:35:48.390008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:31: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:48.390062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2026-01-08T15:35:48.390125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:132: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:48.401118Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:48.516005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 1500, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:48.516220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2026-01-08T15:35:48.516283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:48.516589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 128 -> 240 2026-01-08T15:35:48.516671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:48.516871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2026-01-08T15:35:48.516947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:412: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2026-01-08T15:35:48.517927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2026-01-08T15:35:48.517997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2026-01-08T15:35:48.518229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2026-01-08T15:35:48.518283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:559:2493], at schemeshard: 72057594046644480, txId: 1, path id: 1 2026-01-08T15:35:48.522359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:48.522433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046644480] TDone opId# 1:0 ProgressState 2026-01-08T15:35:48.522580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:35:48.522626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:48.522680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:35:48.522742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:48.522795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2026-01-08T15:35:48.522845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:48.522888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 1:0 2026-01-08T15:35:48.522940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 1:0 2026-01-08T15:35:48.523027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 3 2026-01-08T15:35:48.523072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2026-01-08T15:35:48.523132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 6 2026-01-08T15:35:48.525913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshar ... ountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046644480 2026-01-08T15:35:49.220980Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:68:2115] Handle TEvNavigate describe path /Root/IncrBackupTable 2026-01-08T15:35:49.221108Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:1024:2865] HANDLE EvNavigateScheme /Root/IncrBackupTable 2026-01-08T15:35:49.221645Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:1024:2865] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:35:49.221733Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:1024:2865] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2026-01-08T15:35:49.223589Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:1024:2865] Handle TEvDescribeSchemeResult Forward to# [1:829:2724] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 39 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 2500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 39 PathOwnerId: 72057594046644480 2026-01-08T15:35:49.224581Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1026:2867] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:39:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:35:49.224896Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1026:2867] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:38:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:35:49.225289Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1026:2867] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 38] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2026-01-08T15:35:49.225537Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1026:2867] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> BsControllerConfig::MergeIntersectingBoxes [GOOD] >> BsControllerConfig::MoveGroups >> BsControllerConfig::SoleCommandErrorWhenCombined [GOOD] >> IncrementalRestoreScan::Empty [GOOD] >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties-UseSchemaSecrets [GOOD] Test command err: Trying to start YDB, gRPC: 20745, MsgBus: 23933 2026-01-08T15:34:56.246030Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014683547676048:2166];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:56.246129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:56.291184Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:56.685327Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:56.708283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:56.708382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:56.712703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:56.898160Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:56.901086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014683547675919:2081] 1767886496235184 != 1767886496235187 2026-01-08T15:34:56.927222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:56.927250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:56.927276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:56.927359Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:56.952985Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:57.260744Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:57.425575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:57.438636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:34:57.549489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:57.817238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:58.124887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:58.270829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:00.843323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014700727546973:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:00.843513Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:00.844017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014700727546983:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:00.844079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.176682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.242876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.253399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014683547676048:2166];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:01.253637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:01.322846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.410903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.463336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.548619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.619007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.691175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:01.807794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014705022515159:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.807877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.808643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014705022515164:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.808709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014705022515165:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.808795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:01.813600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:01.838190Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014705022515168:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2026-01-08T15:35:01.911493Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014705022515221:3769] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: E ... _operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:34.106422Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:34.227526Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.180829Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014862239575333:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.180954Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.181217Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014862239575343:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.181273Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.287237Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014840764737021:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:37.287324Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:37.323260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.369515Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.413144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.457464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.497306Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.564124Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.610275Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.669675Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:37.777154Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014862239576223:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.777234Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.777483Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014862239576228:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.777512Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014862239576229:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.777534Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:37.782085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:37.808972Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014862239576232:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:37.883470Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014862239576283:3777] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:40.439495Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:41.040485Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:41.899557Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.628616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:43.212786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:43.907336Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710700:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:44.393587Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:44.452815Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2026-01-08T15:35:47.474411Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:35:47.474446Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:48.745142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710734:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::SoleCommandErrorWhenCombined [GOOD] Test command err: 2026-01-08T15:35:48.431996Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:48.433346Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:48.433787Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:48.436001Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:48.436303Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:48.436436Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:48.436457Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:48.436645Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:48.445327Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:48.445424Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:48.445568Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:48.445643Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:48.445723Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:48.445780Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:48.656781Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.148314s 2026-01-08T15:35:48.656954Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.148519s Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:26:2057] recipient: [11:24:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:26:2057] recipient: [11:24:2068] Leader for TabletID 72057594037932033 is [11:28:2070] sender: [11:29:2057] recipient: [11:24:2068] 2026-01-08T15:35:50.719427Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:50.720486Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:50.720734Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:50.722024Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:50.722591Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:50.722881Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:50.722910Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:50.723146Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:50.732737Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:50.732934Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:50.733048Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:50.733188Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:50.733296Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:50.733386Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:28:2070] sender: [11:49:2057] recipient: [11:11:2058] 2026-01-08T15:35:50.744783Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:50.745127Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:50.766848Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:50.766986Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:50.767154Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:50.767237Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:50.767340Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:50.767384Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:50.767416Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:50.767465Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:50.778191Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:50.778428Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:50.790730Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:50.790861Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:50.791959Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:50.792006Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:50.792143Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:50.792185Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:50.792774Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { } } Command { EnableSelfHeal { } } Command { QueryBaseConfig { } } } Status { Success: true } Status { ErrorDescription: "command must be sole" } ErrorDescription: "command must be sole" Leader for TabletID 72057594037932033 is [0:0:0] sender: [12:26:2057] recipient: [12:24:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [12:26:2057] recipient: [12:24:2068] Leader for TabletID 72057594037932033 is [12:28:2070] sender: [12:29:2057] recipient: [12:24:2068] 2026-01-08T15:35:51.073681Z node 12 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:51.074716Z node 12 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:51.074969Z node 12 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:51.076400Z node 12 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:51.076939Z node 12 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:51.077126Z node 12 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:51.077239Z node 12 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:51.077416Z node 12 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:51.091919Z node 12 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:51.092066Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:51.092202Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:51.092307Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:51.092397Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:51.092469Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [12:28:2070] sender: [12:49:2057] recipient: [12:11:2058] 2026-01-08T15:35:51.106941Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:51.107084Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:51.131131Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:51.131286Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:51.131384Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:51.131470Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:51.131573Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:51.131633Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:51.131691Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:51.131779Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:51.142822Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:51.142959Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:51.154844Z node 12 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:51.154971Z node 12 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:51.156292Z node 12 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:51.156349Z node 12 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:51.156565Z node 12 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:51.156619Z node 12 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:51.157322Z node 12 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { } } Command { EnableSelfHeal { } } Command { QueryBaseConfig { } } } Status { Success: true } Status { ErrorDescription: "command must be sole" } ErrorDescription: "command must be sole" |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> TSchemeShardConsistentCopyTablesTest::IncrementalBackupIndexesContinuesToWork >> KqpPg::ValuesInsert-useSink [GOOD] >> PgCatalog::PgType >> KikimrIcGateway::TestLoadDataSourceProperties+UseSchemaSecrets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:49.881629Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:50.008713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:50.009171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:50.009341Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:50.297467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:50.297603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:50.385758Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886546566540 != 1767886546566544 2026-01-08T15:35:50.397163Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:50.454774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:50.584631Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:50.912448Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:182: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:833:2727] Exhausted 2026-01-08T15:35:50.912594Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:131: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:833:2727] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2026-01-08T15:35:50.912651Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:195: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:833:2727] Finish Done |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDate [GOOD] >> ConvertYdbPermissionNameToACLAttrs::TestEqualGranularAndDeprecatedAcl [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::List [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDateTime [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzTimeStamp [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32TypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> ConvertMiniKQLTypeToYdbTypeTest::SimpleType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDate [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Optional [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::List [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Struct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Dict [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithMultipleIndexes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:49.025297Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:49.171066Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:49.171568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:49.171800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:49.517202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:49.517340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:49.631872Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886545697593 != 1767886545697597 2026-01-08T15:35:49.648527Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:49.698468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:49.789298Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:50.251905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2026-01-08T15:35:50.252152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.252379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2026-01-08T15:35:50.252446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2026-01-08T15:35:50.252801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:50.252900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:50.253723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:50.253888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2026-01-08T15:35:50.254108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.254184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2026-01-08T15:35:50.254279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2026-01-08T15:35:50.254335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 2 -> 3 2026-01-08T15:35:50.255064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.255114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:35:50.255162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 3 -> 128 2026-01-08T15:35:50.255653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.255727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.255776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:50.255842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2026-01-08T15:35:50.259817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:35:50.260484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2026-01-08T15:35:50.260652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2026-01-08T15:35:50.261408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:31: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:50.261461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2026-01-08T15:35:50.261536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:132: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:50.272944Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:50.376868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 1500, transactions count in step: 1, at schemeshard: 72057594046644480 2026-01-08T15:35:50.377052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2026-01-08T15:35:50.377108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:50.377394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 128 -> 240 2026-01-08T15:35:50.377461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2026-01-08T15:35:50.377642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2026-01-08T15:35:50.377779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:412: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2026-01-08T15:35:50.378652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2026-01-08T15:35:50.378716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2026-01-08T15:35:50.378955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2026-01-08T15:35:50.379005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:559:2493], at schemeshard: 72057594046644480, txId: 1, path id: 1 2026-01-08T15:35:50.379624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2026-01-08T15:35:50.379696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046644480] TDone opId# 1:0 ProgressState 2026-01-08T15:35:50.379811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:35:50.379862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:50.379908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:35:50.379935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:50.379968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2026-01-08T15:35:50.380002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:35:50.380033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 1:0 2026-01-08T15:35:50.380064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 1:0 2026-01-08T15:35:50.380135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 3 2026-01-08T15:35:50.380175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2026-01-08T15:35:50.380213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 6 2026-01-08T15:35:50.382199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshar ... ration id: 281474976715658:0 2026-01-08T15:35:51.034664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976715658:0 2026-01-08T15:35:51.034817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 39] was 3 2026-01-08T15:35:51.035920Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:402: actor# [1:68:2115] Handle TEvNavigate describe path /Root/IncrBackupTable 2026-01-08T15:35:51.036051Z node 1 :TX_PROXY DEBUG: describe.cpp:270: Actor# [1:1012:2859] HANDLE EvNavigateScheme /Root/IncrBackupTable 2026-01-08T15:35:51.038069Z node 1 :TX_PROXY DEBUG: describe.cpp:354: Actor# [1:1012:2859] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2026-01-08T15:35:51.038223Z node 1 :TX_PROXY DEBUG: describe.cpp:433: Actor# [1:1012:2859] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" Options { ShowPrivateTable: true } 2026-01-08T15:35:51.039546Z node 1 :TX_PROXY DEBUG: describe.cpp:446: Actor# [1:1012:2859] Handle TEvDescribeSchemeResult Forward to# [1:829:2724] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 39 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 2500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 38 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 39 PathOwnerId: 72057594046644480 2026-01-08T15:35:51.040985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [1:1022:2863], serverId# [1:1023:2864], sessionId# [0:0:0] 2026-01-08T15:35:51.041848Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:39:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:35:51.042118Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:38:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2026-01-08T15:35:51.042434Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 38] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2026-01-08T15:35:51.042698Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:139: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 38] BodySize: 18 }] } 2026-01-08T15:35:51.042845Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:144: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 38] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 39] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2026-01-08T15:35:51.043037Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:391: actor# [1:68:2115] Handle TEvGetProxyServicesRequest 2026-01-08T15:35:51.043129Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][0:0][72075186224037888][1:1028:2865] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2026-01-08T15:35:51.043534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:1029:2869], serverId# [1:1030:2870], sessionId# [0:0:0] 2026-01-08T15:35:51.091868Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][0:0][72075186224037888][1:1028:2865] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2026-01-08T15:35:51.092029Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2026-01-08T15:35:51.092201Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][0:0][72075186224037888][1:1028:2865] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 38] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 39] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2026-01-08T15:35:51.092295Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2026-01-08T15:35:51.092450Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 38]][1:1024:2865] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetryOnRetryableError [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBool [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBoolTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimal [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimalTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalString [GOOD] >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest |87.9%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> BsControllerConfig::DeleteStoragePool [GOOD] >> BsControllerConfig::CommandRollbackWhenCombined ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:20.717243Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:20.842593Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:20.842999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:20.843148Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:21.153581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:21.153734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:21.238361Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886517105303 != 1767886517105307 2026-01-08T15:35:21.249641Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:21.298726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:21.424461Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:21.787315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:21.804765Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:21.923248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:21.970284Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:35:21.970627Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:22.021280Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:22.021422Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:22.023197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:22.023322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:22.023408Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:22.023952Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:22.024162Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:22.024253Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:35:22.039062Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:22.065044Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:22.065275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:22.065395Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:35:22.065427Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:22.065464Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:22.065512Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.065992Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:22.066123Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:22.066217Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.066253Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:22.066294Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:22.066354Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.066848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:35:22.067080Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:22.067420Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:22.067551Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:22.069436Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:22.083213Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:22.083365Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:22.219476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:35:22.227020Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:22.227136Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.227464Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.227516Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:22.227587Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:22.227940Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:22.228103Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:22.228930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:22.229027Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:22.231451Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:22.231980Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:22.234271Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:22.234439Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.235191Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:22.235264Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.236545Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:22.236600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:22.236651Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:22.236724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:22.236786Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:22.236872Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:22.247647Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:22.249857Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:22.249955Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:22.250898Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:22.266695Z node 1 :TX_DATASHARD DEBU ... 38], version# 2, step# 2500, txId# 281474976715658, at tablet# 72075186224037888 2026-01-08T15:35:50.805211Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:50.857415Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2026-01-08T15:35:50.857558Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:50.857613Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:50.857689Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:50.857786Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:50.857859Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2026-01-08T15:35:50.857986Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:50.860638Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2026-01-08T15:35:50.860748Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:50.870618Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1064:2893], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:50.870759Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1074:2898], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:50.870850Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:50.871995Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1079:2902], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:50.872166Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:50.879382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:50.889233Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:51.099503Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:51.103784Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:1078:2901], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:35:51.136159Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:1137:2941] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:51.225110Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1167:2958], serverId# [4:1168:2959], sessionId# [0:0:0] 2026-01-08T15:35:51.225618Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037888 2026-01-08T15:35:51.225880Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 1 Group: 1767886551225761 Step: 3000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcDataChange Source: Unspecified Body: 32b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:51.226138Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037888, row count=1 2026-01-08T15:35:51.239112Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 32 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 2 } 2026-01-08T15:35:51.239216Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:51.288445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) 2026-01-08T15:35:51.292111Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:51.292427Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715662 ssId 72057594046644480 seqNo 2:3 2026-01-08T15:35:51.292520Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:235: Check scheme tx, proposed scheme version# 3 current version# 2 expected version# 3 at tablet# 72075186224037888 txId# 281474976715662 2026-01-08T15:35:51.292573Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715662 at tablet 72075186224037888 2026-01-08T15:35:51.305180Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:51.428553Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715662 at step 3500 at tablet 72075186224037888 { Transactions { TxId: 281474976715662 AckTo { RawX1: 0 RawX2: 0 } } Step: 3500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:35:51.428652Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:51.429076Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:51.429166Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:51.429239Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3500:281474976715662] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:51.429644Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 3500:281474976715662 keys extracted: 0 2026-01-08T15:35:51.429833Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:51.430115Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:51.430226Z node 4 :TX_DATASHARD INFO: alter_table_unit.cpp:145: Trying to ALTER TABLE at 72075186224037888 version 3 2026-01-08T15:35:51.432479Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1849: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 38], version# 3, step# 3500, txId# 281474976715662, at tablet# 72075186224037888 2026-01-08T15:35:51.432673Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:883: PersistChangeRecord: record: { Order: 2 Group: 0 Step: 3500 TxId: 281474976715662 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] Kind: CdcSchemaChange Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 3 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2026-01-08T15:35:51.433206Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:51.435788Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2026-01-08T15:35:51.435914Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:51.438277Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:51.438410Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 3 } 2026-01-08T15:35:51.438637Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [3500 : 281474976715662] from 72075186224037888 at tablet 72075186224037888 send result to client [4:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:51.438714Z node 4 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715662 state Ready TxInFly 0 2026-01-08T15:35:51.438857Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 3 } 2026-01-08T15:35:51.438920Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:51.442665Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2026-01-08T15:35:51.442788Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:51.448921Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1210:2996], serverId# [4:1211:2997], sessionId# [0:0:0] 2026-01-08T15:35:51.468561Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [4:1213:2999], serverId# [4:1214:3000], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestTabletRestoreEventsOrder [GOOD] Test command err: 2026-01-08T15:30:42.867501Z node 1 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 1 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:108:2057] recipient: [1:106:2138] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:108:2057] recipient: [1:106:2138] Leader for TabletID 72057594037927937 is [1:112:2142] sender: [1:113:2057] recipient: [1:106:2138] 2026-01-08T15:30:42.983607Z node 1 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:30:42.983697Z node 1 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:30:42.983826Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:30:42.983888Z node 1 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:154:2057] recipient: [1:152:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:154:2057] recipient: [1:152:2172] Leader for TabletID 72057594037927938 is [1:158:2176] sender: [1:159:2057] recipient: [1:152:2172] Leader for TabletID 72057594037927937 is [1:112:2142] sender: [1:184:2057] recipient: [1:14:2061] 2026-01-08T15:30:43.009723Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:30:43.029227Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1316: [PQ: 72057594037927937] Config applied version 1 actor [1:182:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 } 2026-01-08T15:30:43.030366Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][0][StateInit] bootstrapping 0 [1:190:2142] 2026-01-08T15:30:43.032969Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][0][StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:190:2142] 2026-01-08T15:30:43.035304Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][1][StateInit] bootstrapping 1 [1:191:2142] 2026-01-08T15:30:43.037241Z node 1 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][1][StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:191:2142] 2026-01-08T15:30:43.045581Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.046121Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d444dfec-c2e96175-90c53210-f09efff2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Got start offset = 100 2026-01-08T15:30:43.064321Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.064918Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a8b8a67f-bd11d506-ec1062d6-f4df7a33_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2026-01-08T15:30:43.092779Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.093237Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b5372d3f-861d28c7-9ceb155e-e209b694_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Got start offset = 2000 2026-01-08T15:30:43.126199Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.126661Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f5767e4a-e3124784-804fc2d7-b15df479_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Got start offset = 2000 2026-01-08T15:30:43.128348Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.135188Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d96a8f42-4069774-47ee6b32-7dc78142_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2026-01-08T15:30:43.192945Z node 1 :PERSQUEUE ERROR: pq_impl.cpp:203: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2026-01-08T15:30:43.193404Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5585b3bf-3d25a0ed-6365b231-7412df79_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Got start offset = 2000 2026-01-08T15:30:43.547698Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2138] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2138] Leader for TabletID 72057594037927937 is [2:111:2142] sender: [2:112:2057] recipient: [2:105:2138] 2026-01-08T15:30:43.675057Z node 2 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:30:43.675145Z node 2 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:30:43.675234Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:30:43.675306Z node 2 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2142] sender: [2:183:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:111:2142]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:111:2142] sender: [2:185:2057] recipient: [2:103:2137] Leader for TabletID 72057594037927937 is [2:111:2142] sender: [2:188:2057] recipient: [2:187:2197] Leader for TabletID 72057594037927937 is [2:189:2198] sender: [2:190:2057] recipient: [2:187:2197] 2026-01-08T15:30:43.775247Z node 2 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:30:43.775323Z node 2 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:30:43.775384Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:30:43.775446Z node 2 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:111:2142]) rebooted! !Reboot 72057594037927937 (actor [2:111:2142]) tablet resolver refreshed! new actor is[2:189:2198] 2026-01-08T15:30:43.797165Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:43.854662Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:43.889115Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:43.900285Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:43.938640Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.008204Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.052422Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.182868Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.216599Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.531502Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.582296Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:44.930655Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:45.209323Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:30:45.302689Z node 2 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 2 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 Leader for TabletID 72057594037927937 is [2:189:2198] sender: [2:270:2057] recipient: [2:14:2061] 2026-01-08T15:30:45.512619Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:30:45.513826Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1316: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 } 2026-01-08T15:30:45.524827Z node 2 :PERSQUEUE INFO: partition_in ... 2026-01-08T15:35:49.071211Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:588:2576] connected; active server actors: 1 2026-01-08T15:35:49.073887Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:593:2581] connected; active server actors: 1 2026-01-08T15:35:49.076570Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:598:2586] connected; active server actors: 1 2026-01-08T15:35:49.079417Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:603:2591] connected; active server actors: 1 2026-01-08T15:35:49.082344Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:608:2596] connected; active server actors: 1 2026-01-08T15:35:49.086786Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:613:2601] connected; active server actors: 1 2026-01-08T15:35:49.089687Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:618:2606] connected; active server actors: 1 2026-01-08T15:35:49.093926Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:623:2611] connected; active server actors: 1 2026-01-08T15:35:49.097466Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:628:2616] connected; active server actors: 1 2026-01-08T15:35:49.101365Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:633:2621] connected; active server actors: 1 2026-01-08T15:35:49.105429Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:638:2626] connected; active server actors: 1 2026-01-08T15:35:49.111605Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:643:2631] connected; active server actors: 1 2026-01-08T15:35:49.115161Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:648:2636] connected; active server actors: 1 2026-01-08T15:35:49.118499Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:653:2641] connected; active server actors: 1 2026-01-08T15:35:49.124442Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:658:2646] connected; active server actors: 1 2026-01-08T15:35:49.127538Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:663:2651] connected; active server actors: 1 2026-01-08T15:35:49.130771Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:668:2656] connected; active server actors: 1 2026-01-08T15:35:49.134781Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:673:2661] connected; active server actors: 1 2026-01-08T15:35:49.137691Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:678:2666] connected; active server actors: 1 2026-01-08T15:35:49.140658Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:683:2671] connected; active server actors: 1 2026-01-08T15:35:49.145718Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:688:2676] connected; active server actors: 1 2026-01-08T15:35:49.150763Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:693:2681] connected; active server actors: 1 2026-01-08T15:35:49.153806Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:698:2686] connected; active server actors: 1 2026-01-08T15:35:49.156725Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:703:2691] connected; active server actors: 1 2026-01-08T15:35:49.159506Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:708:2696] connected; active server actors: 1 2026-01-08T15:35:49.162279Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:713:2701] connected; active server actors: 1 2026-01-08T15:35:49.164983Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:718:2706] connected; active server actors: 1 2026-01-08T15:35:49.167834Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:723:2711] connected; active server actors: 1 2026-01-08T15:35:49.170481Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:728:2716] connected; active server actors: 1 2026-01-08T15:35:49.173120Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:733:2721] connected; active server actors: 1 2026-01-08T15:35:49.175873Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:738:2726] connected; active server actors: 1 2026-01-08T15:35:49.178989Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:743:2731] connected; active server actors: 1 2026-01-08T15:35:49.183464Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:748:2736] connected; active server actors: 1 2026-01-08T15:35:49.186351Z node 156 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72057594037927937] server connected, pipe [156:753:2741], now have 1 active actors on pipe 2026-01-08T15:35:49.204351Z node 156 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72057594037927937] server connected, pipe [156:756:2744], now have 1 active actors on pipe 2026-01-08T15:35:49.205677Z node 156 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72057594037927937] server connected, pipe [156:759:2747], now have 1 active actors on pipe 2026-01-08T15:35:49.206967Z node 156 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72057594037927938][rt3.dc1--topic] pipe [156:762:2750] connected; active server actors: 1 2026-01-08T15:35:49.741664Z node 157 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 157 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:35:49.810882Z node 157 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:35:49.810984Z node 157 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:35:49.811066Z node 157 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:49.811157Z node 157 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info 2026-01-08T15:35:49.864233Z node 157 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:35:49.864321Z node 157 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:35:49.864401Z node 157 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:49.864483Z node 157 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info 2026-01-08T15:35:49.864894Z node 157 :TABLET_RESOLVER INFO: tablet_resolver.cpp:696: No candidates for SelectForward, node 157 selfDC leaderDC 1:2:0 local 0 localDc 0 other 0 disallowed 0 2026-01-08T15:35:49.868437Z node 157 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:49.869883Z node 157 :PERSQUEUE INFO: pq_impl.cpp:1316: [PQ: 72057594037927937] Config applied version 163 actor [157:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 163 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 163 } Consumers { Name: "aaa" Generation: 163 Important: true } 2026-01-08T15:35:49.871027Z node 157 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][0][StateInit] bootstrapping 0 [157:248:2196] 2026-01-08T15:35:49.872433Z node 157 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][0][StateInit] init complete for topic 'topic' partition 0 generation 3 [157:248:2196] 2026-01-08T15:35:49.874033Z node 157 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][1][StateInit] bootstrapping 1 [157:250:2196] 2026-01-08T15:35:49.875013Z node 157 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][1][StateInit] init complete for topic 'topic' partition 1 generation 3 [157:250:2196] 2026-01-08T15:35:49.924460Z node 157 :PQ_TX INFO: pq_impl.cpp:564: [PQ: 72057594037927937] doesn't have tx info 2026-01-08T15:35:49.924554Z node 157 :PQ_TX INFO: pq_impl.cpp:576: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2026-01-08T15:35:49.926243Z node 157 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:35:49.926356Z node 157 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72057594037927937] doesn't have tx writes info 2026-01-08T15:35:49.927441Z node 157 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][0][StateInit] bootstrapping 0 [157:326:2254] 2026-01-08T15:35:49.929053Z node 157 :PERSQUEUE INFO: partition_init.cpp:1155: [72057594037927937][Partition][1][StateInit] bootstrapping 1 [157:328:2254] 2026-01-08T15:35:49.934534Z node 157 :PERSQUEUE INFO: partition_init.cpp:1016: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2026-01-08T15:35:49.935797Z node 157 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][0][StateInit] init complete for topic 'topic' partition 0 generation 4 [157:326:2254] 2026-01-08T15:35:49.936107Z node 157 :PERSQUEUE INFO: partition_init.cpp:1016: [topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2026-01-08T15:35:49.936725Z node 157 :PERSQUEUE INFO: partition.cpp:712: [72057594037927937][Partition][1][StateInit] init complete for topic 'topic' partition 1 generation 4 [157:328:2254] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_change_collector/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/persqueue/ut/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt64 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDate [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDateTime [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzTimeStamp [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleDecimal [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyTableWithMultipleIndexes [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:50.256900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:50.257039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:50.257089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:50.257150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:50.257196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:50.257227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:50.257283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:50.257355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:50.258365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:50.258718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:50.357789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:50.357873Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:50.372072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:50.372517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:50.372725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:50.387774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:50.388255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:50.389075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:50.389997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:50.393766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:50.393987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:50.395348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:50.395418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:50.395544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:50.395600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:50.395644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:50.395878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:50.554932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.556919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.557543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... hildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 46 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 45 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:52.575881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1219:3130], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableCopy/ValueIndex2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:35:52.575948Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:52.576057Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableCopy/ValueIndex2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:52.576318Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableCopy/ValueIndex2" took 259us result status StatusSuccess 2026-01-08T15:35:52.577172Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableCopy/ValueIndex2" PathDescription { Self { Name: "ValueIndex2" PathId: 46 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 43 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 47 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 46 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 46 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "ValueIndex2" LocalPathId: 46 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value2" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value2" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 46 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:52.577788Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1220:3131], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableCopy/ValueIndex2/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true } 2026-01-08T15:35:52.577839Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:52.577932Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableCopy/ValueIndex2/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:52.578183Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableCopy/ValueIndex2/indexImplTable" took 197us result status StatusSuccess 2026-01-08T15:35:52.578632Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableCopy/ValueIndex2/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 47 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 46 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value2" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 46 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 47 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> CellsFromTupleTest::CellsFromTupleSuccess [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg >> CellsFromTupleTest::CellsFromTupleSuccessPg [GOOD] >> CellsFromTupleTest::CellsFromTupleFails [GOOD] >> CellsFromTupleTest::CellsFromTupleFailsPg [GOOD] >> CompressionTests::Zstd [GOOD] >> CompressionTests::Unsupported [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> BsControllerConfig::CommandRollbackWhenCombined [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleBool [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalString [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::List [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties+UseSchemaSecrets [GOOD] Test command err: Trying to start YDB, gRPC: 24818, MsgBus: 4180 2026-01-08T15:35:00.423996Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014702376967932:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:00.424092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:00.474021Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:00.833194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:00.833314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:00.835815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:00.933601Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:00.935052Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:00.938558Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014702376967820:2081] 1767886500418144 != 1767886500418147 2026-01-08T15:35:01.062511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:01.062541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:01.062552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:01.062708Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:01.196969Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:01.438776Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:01.835754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:01.948068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.174285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.550640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:02.659271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.128194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014723851806184:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.128293Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.128700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014723851806194:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.128760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:05.426426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014702376967932:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:05.426496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:05.560387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.611911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.660259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.723554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.794901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.857503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:05.935506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.001211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:06.127242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014728146774365:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.127298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.127462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014728146774370:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.127484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014728146774371:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.127552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:06.131628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:06.149711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710670, at schemeshard: 72057594046644480 2026-01-08T15:35:06.151107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014728146774374:2490], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:06.312045Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014728146774425:3772] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EP ... _service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:43.446938Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:43.682587Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:43.682632Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:43.682651Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:43.682773Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:43.869030Z node 4 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:44.168111Z node 4 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:44.363848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:44.369690Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:44.382342Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:44.492843Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:44.704889Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:44.843482Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.051988Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014906895241761:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.052107Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.052438Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014906895241770:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.052512Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.149817Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.163315Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7593014885420403433:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:48.163390Z node 4 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:48.201694Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.253055Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.303457Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.361305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.426420Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.476115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.544699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:48.666802Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014906895242646:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.666941Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.667607Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014906895242651:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.667679Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7593014906895242652:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.667828Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:48.676031Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:48.700068Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7593014906895242655:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:48.796515Z node 4 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [4:7593014906895242706:3778] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:51.243741Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) 2026-01-08T15:35:51.298282Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSecret, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_secret.cpp:267) 2026-01-08T15:35:51.306423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710674, at schemeshard: 72057594046644480 2026-01-08T15:35:51.370490Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/provider/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetryOnRetryableError [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:40.074797Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:40.186579Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:40.186898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:40.187083Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:40.469033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:40.469167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:40.557421Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886536839544 != 1767886536839548 2026-01-08T15:35:40.572756Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:40.620876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:40.737231Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:41.078284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:41.092448Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:41.219446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:41.314544Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:904:2774] 2026-01-08T15:35:41.314948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.372728Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.373002Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:906:2776] 2026-01-08T15:35:41.373144Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.381217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.386257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:41.386430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:41.386509Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:41.386976Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.388063Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.388153Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:947:2774] in generation 1 2026-01-08T15:35:41.390511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.390790Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.391961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:41.392022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:41.392064Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:41.392288Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.392534Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:911:2780] 2026-01-08T15:35:41.392684Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.400196Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:915:2783] 2026-01-08T15:35:41.400440Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.409344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.409458Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:958:2776] in generation 1 2026-01-08T15:35:41.411340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.411475Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.413007Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2026-01-08T15:35:41.413094Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037891 2026-01-08T15:35:41.413194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037891 2026-01-08T15:35:41.413547Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.413748Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.413821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037891 persisting started state actor id [1:968:2780] in generation 1 2026-01-08T15:35:41.414166Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.414251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.415783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2026-01-08T15:35:41.415855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037890 2026-01-08T15:35:41.415911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037890 2026-01-08T15:35:41.416217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.416321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.416379Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037890 persisting started state actor id [1:969:2783] in generation 1 2026-01-08T15:35:41.428130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:41.484256Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:41.484501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:41.484646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:974:2816] 2026-01-08T15:35:41.484695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:41.484733Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:41.484770Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:41.485527Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:41.485603Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:41.485698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:41.485776Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:975:2817] 2026-01-08T15:35:41.485805Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:41.485831Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:41.485859Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:41.485945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:41.485978Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037891 2026-01-08T15:35:41.486028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:41.486099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037891, actorId: [1:976:2818] 2026-01-08T15:35:41.486124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037891 2026-01-08T15:35:41.486147Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2026-01-08T15:35:41.486185Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2026-01-08T15:35:41.486463Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:41.486591Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:41.486764Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:41.486801Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:35:41.486858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:41.486945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037890, a ... 92 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 93 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 94 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 95 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 }, { Order: 96 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 38] SchemaVersion: 1 } 2026-01-08T15:35:52.979597Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:52.982525Z node 3 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037889:1][3:933:2795] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 33 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 34 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 35 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 36 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 37 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 38 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 39 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 40 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 41 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 42 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 43 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 44 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 45 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 46 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 47 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 48 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 49 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 50 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 51 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 52 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 53 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 54 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 55 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 56 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 57 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 58 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 59 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 60 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 61 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 62 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 63 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 64 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 65 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 66 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 67 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 68 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 69 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 70 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 71 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 72 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 73 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 74 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 75 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 76 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 77 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 78 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 79 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 80 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 81 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 82 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 83 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 84 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 85 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 86 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 87 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 88 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 89 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 90 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 91 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 92 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 93 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 94 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 95 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 96 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 }] } 2026-01-08T15:35:52.983052Z node 3 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:224: [AsyncIndexChangeSenderMain][72075186224037889:1][3:972:2826] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 33 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 34 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 35 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 36 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 37 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 38 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 39 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 40 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 41 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 42 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 43 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 44 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 45 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 46 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 47 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 48 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 49 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 50 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 51 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 52 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 53 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 54 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 55 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 56 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 57 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 58 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 59 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 60 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 61 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 62 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 63 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 64 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 65 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 66 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 67 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 68 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 69 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 70 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 71 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 72 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 73 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 74 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 75 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 76 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 77 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 78 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 79 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 80 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 81 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 82 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 83 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 84 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 85 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 86 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 87 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 88 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 89 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 90 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 91 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 92 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 93 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 94 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 95 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 },{ Order: 96 PathId: [OwnerId: 72057594046644480, LocalPathId: 39] BodySize: 28 }] } 2026-01-08T15:35:52.983283Z node 3 :RPC_REQUEST DEBUG: upload_rows_common_impl.h:1282: Upload rows: got OK from shard 72075186224037889 description: 2026-01-08T15:35:52.983436Z node 3 :RPC_REQUEST DEBUG: upload_rows_common_impl.h:1412: completed with status SUCCESS |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_upload_rows/unittest >> ConvertYdbValueToMiniKQLValueTest::Void [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuidTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Struct [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Tuple [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Variant [GOOD] >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] |87.9%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> DataShardVolatile::NotCachingAbortingDeletes-UseSink [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 >> TSchemeShardConsistentCopyTablesTest::IncrementalBackupIndexesContinuesToWork [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::CommandRollbackWhenCombined [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3067:2106] recipient: [1:2963:2117] 2026-01-08T15:35:36.821108Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:36.822499Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:36.822973Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:36.825197Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:36.826084Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:36.826469Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:36.826506Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:36.826858Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:36.843492Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:36.843636Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:36.843849Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:36.844018Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:36.844119Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:36.844189Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3088:2106] recipient: [1:60:2107] 2026-01-08T15:35:36.856523Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:36.856714Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:36.907270Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:36.907451Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:36.907549Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:36.907642Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:36.907789Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:36.907868Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:36.907907Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:36.907952Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:36.919041Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:36.919215Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:36.930269Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:36.930466Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:36.932034Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:36.932097Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:36.932314Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:36.932374Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:36.966632Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } } 2026-01-08T15:35:36.968359Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2026-01-08T15:35:36.968470Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2026-01-08T15:35:36.968498Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2026-01-08T15:35:36.968524Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2026-01-08T15:35:36.968548Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2026-01-08T15:35:36.968579Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2026-01-08T15:35:36.968606Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2026-01-08T15:35:36.968647Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2026-01-08T15:35:36.968675Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2026-01-08T15:35:36.968697Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2026-01-08T15:35:36.968720Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2026-01-08T15:35:36.968753Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2026-01-08T15:35:36.968794Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2026-01-08T15:35:36.968816Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2026-01-08T15:35:36.968843Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2026-01-08T15:35:36.968882Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2026-01-08T15:35:36.968911Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2026-01-08T15:35:36.968936Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2026-01-08T15:35:36.968959Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2026-01-08T15:35:36.968983Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2026-01-08T15:35:36.969007Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2026-01-08T15:35:36.969029Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1000 Path# /dev/disk ... UG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:53.981726Z node 101 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:53.982107Z node 101 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:53.982149Z node 101 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:53.984610Z node 101 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:53.996491Z node 101 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:53.996640Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:53.996773Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:53.996901Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:53.996994Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:53.997055Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [101:28:2070] sender: [101:49:2057] recipient: [101:11:2058] 2026-01-08T15:35:54.009260Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:54.009426Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:54.030906Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:54.031056Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:54.031139Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:54.031251Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:54.031393Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:54.031453Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:54.031504Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:54.031571Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:54.046723Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:54.046912Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:54.058994Z node 101 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:54.059144Z node 101 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:54.060531Z node 101 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:54.060603Z node 101 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:54.060824Z node 101 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:54.060868Z node 101 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:54.061768Z node 101 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { ReadHostConfig { } } Command { DefineHostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenCombined" } } Command { ReadHostConfig { } } Rollback: true } Status { Success: true } Status { Success: true } Status { Success: true HostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenCombined" ItemConfigGeneration: 1 } } ErrorDescription: "transaction rollback" RollbackSuccess: true 2026-01-08T15:35:54.062886Z node 101 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { ReadHostConfig { } } } Status { Success: true } Success: true ConfigTxSeqNo: 1 Leader for TabletID 72057594037932033 is [0:0:0] sender: [102:26:2057] recipient: [102:24:2068] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [102:26:2057] recipient: [102:24:2068] Leader for TabletID 72057594037932033 is [102:28:2070] sender: [102:29:2057] recipient: [102:24:2068] 2026-01-08T15:35:54.291922Z node 102 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:54.293747Z node 102 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:54.294045Z node 102 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:54.295707Z node 102 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:54.296046Z node 102 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:54.296313Z node 102 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:54.296346Z node 102 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:54.296547Z node 102 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:54.308212Z node 102 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:54.308368Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:54.308497Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:54.308637Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:54.308773Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:54.308964Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [102:28:2070] sender: [102:49:2057] recipient: [102:11:2058] 2026-01-08T15:35:54.320827Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:54.321036Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:54.342977Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:54.343134Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:54.343210Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:54.343365Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:54.343506Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:54.343573Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:54.343607Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:54.343687Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:54.355864Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:54.356028Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:54.366995Z node 102 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:54.367141Z node 102 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:54.368660Z node 102 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:54.368716Z node 102 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:54.368938Z node 102 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:54.368987Z node 102 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:54.370603Z node 102 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { ReadHostConfig { } } Command { DefineHostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenCombined" } } Command { ReadHostConfig { } } Rollback: true } Status { Success: true } Status { Success: true } Status { Success: true HostConfig { HostConfigId: 1 Name: "TestCommandRollbackWhenCombined" ItemConfigGeneration: 1 } } ErrorDescription: "transaction rollback" RollbackSuccess: true 2026-01-08T15:35:54.371715Z node 102 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { ReadHostConfig { } } } Status { Success: true } Success: true ConfigTxSeqNo: 1 |87.9%| [TA] $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbProxy::ListDirectory |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> TTxDataShardUploadRows::TestUploadShadowRows [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> YdbProxy::RemoveDirectory >> PartitionEndWatcher::EmptyPartition [GOOD] >> PartitionEndWatcher::AfterCommit [GOOD] >> YdbProxy::AlterTable >> YdbProxy::ReadTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> TSchemeShardConsistentCopyTablesTest::IncrementalBackupIndexesContinuesToWork [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:52.735207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:52.735299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:52.735333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:52.735381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:52.735438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:52.735465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:52.735518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:52.735572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:52.737250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:52.737575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:52.846360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:52.846440Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:52.857946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:52.858279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:52.858543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:52.883911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:52.884435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:52.885303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:52.886309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:52.890003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:52.890203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:52.891574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:52.891645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:52.891798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:52.891845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:52.891890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:52.892073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:53.035700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.036851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.037951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.038032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:53.038106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... BE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableWithIndex/ValueIndex" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:55.100319Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableWithIndex/ValueIndex" took 253us result status StatusSuccess 2026-01-08T15:35:55.101110Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableWithIndex/ValueIndex" PathDescription { Self { Name: "ValueIndex" PathId: 42 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 104 CreateStep: 5000041 ParentPathId: 41 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 43 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 104 CreateStep: 5000041 ParentPathId: 42 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 53 PathsLimit: 10000 ShardsInside: 8 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "ValueIndex" LocalPathId: 42 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 42 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:55.101725Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:1493:3317], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableWithIndex/ValueIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2026-01-08T15:35:55.101785Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:55.101890Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableWithIndex/ValueIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:55.102191Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableWithIndex/ValueIndex/indexImplTable" took 274us result status StatusSuccess 2026-01-08T15:35:55.103016Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableWithIndex/ValueIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 43 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 104 CreateStep: 5000041 ParentPathId: 42 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "19700101000000Z_continuousBackupImpl" Mode: ECdcStreamModeUpdate PathId { OwnerId: 72057594046678944 LocalId: 49 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 53 PathsLimit: 10000 ShardsInside: 8 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 43 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbProxy::DescribePath |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> YdbProxy::CopyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:224:2066] recipient: [1:205:2077] Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:227:2066] recipient: [1:205:2077] 2026-01-08T15:35:47.069573Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:47.070854Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:47.071243Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:47.073427Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:47.073760Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:47.073922Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:47.073956Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:47.074180Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:47.088836Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:47.088921Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:47.089053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:47.089140Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:47.089223Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:47.089274Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:226:2079] sender: [1:248:2066] recipient: [1:20:2067] 2026-01-08T15:35:47.102895Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:47.103033Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:47.128566Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:47.128704Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:47.128794Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:47.128905Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:47.129018Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:47.129078Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:47.129104Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:47.129131Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:47.139885Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:47.140012Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:47.150800Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:47.150939Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:47.152379Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:47.152443Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:47.152719Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:47.152771Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:47.170177Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } } } Command { QueryBaseConfig { } } } 2026-01-08T15:35:47.170789Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2026-01-08T15:35:47.170850Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2026-01-08T15:35:47.170877Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2026-01-08T15:35:47.170904Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2026-01-08T15:35:47.170944Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2026-01-08T15:35:47.170972Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2026-01-08T15:35:47.171024Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2026-01-08T15:35:47.171051Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2026-01-08T15:35:47.171079Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2026-01-08T15:35:47.171102Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2026-01-08T15:35:47.171126Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2026-01-08T15:35:47.171151Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2026-01-08T15:35:47.171175Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2026-01-08T15:35:47.171200Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2026-01-08T15:35:47.171245Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2026-01-08T15:35:47.171282Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2026-01-08T15:35:47.171320Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2026-01-08T15:35:47.171361Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2026-01-08T15:35:47.171385Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2026-01-08T15:35:47.171409Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2026-01-08T15:35:47.171432Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2026-01-08T15:35:47.171454Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2026-01-08T15:35:47.171500Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2026-01-08T15:35:47.171526Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2026-01-08T15:35:47.171547Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2026-01-08T15:35:47.171580Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2026-01-08T15:35:47.171617Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2026-01-08T15:35:47.171653Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2026-01-08T15:35:47.171702Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2026-01-08T15:35:47.171725Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:224:2066] recipient: [11:208:2077] Leader for TabletID 72057594037932033 is [11:226:2079] sender: [11:227:2066] recipient: [11:208:2077] 2026-01-08T15:35:49.180368Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:49.181443Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:49.181738Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:49.183256Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:49.183739Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:49.184124Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNo ... Port: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:35:51.280235Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 21:1000 Path# /dev/disk 2026-01-08T15:35:51.280354Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 22:1000 Path# /dev/disk 2026-01-08T15:35:51.280397Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 23:1000 Path# /dev/disk 2026-01-08T15:35:51.280431Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 24:1000 Path# /dev/disk 2026-01-08T15:35:51.280490Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 25:1000 Path# /dev/disk 2026-01-08T15:35:51.280544Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 26:1000 Path# /dev/disk 2026-01-08T15:35:51.280594Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 27:1000 Path# /dev/disk 2026-01-08T15:35:51.280633Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 28:1000 Path# /dev/disk 2026-01-08T15:35:51.280675Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 29:1000 Path# /dev/disk 2026-01-08T15:35:51.280708Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 30:1000 Path# /dev/disk 2026-01-08T15:35:51.280751Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 31:1000 Path# /dev/disk 2026-01-08T15:35:51.280777Z node 21 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 32:1000 Path# /dev/disk Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2026-01-08T15:35:51.310043Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" ConfigTxSeqNo: 1 Leader for TabletID 72057594037932033 is [0:0:0] sender: [33:290:2068] recipient: [33:267:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [33:290:2068] recipient: [33:267:2079] Leader for TabletID 72057594037932033 is [33:292:2081] sender: [33:293:2068] recipient: [33:267:2079] 2026-01-08T15:35:53.887858Z node 33 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:53.888901Z node 33 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:53.889187Z node 33 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:53.889746Z node 33 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:53.891084Z node 33 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:53.891346Z node 33 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:53.891373Z node 33 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:53.891599Z node 33 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:53.901598Z node 33 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:53.901796Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:53.901915Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:53.902066Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:53.902172Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:53.902243Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [33:292:2081] sender: [33:314:2068] recipient: [33:22:2069] 2026-01-08T15:35:53.916097Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:53.916272Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:53.948774Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:53.948924Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:53.949016Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:53.949097Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:53.949203Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:53.949268Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:53.949317Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:53.949393Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:53.960237Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:53.960387Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:53.971276Z node 33 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:53.971444Z node 33 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:53.972910Z node 33 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:53.972977Z node 33 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:53.973207Z node 33 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:53.973259Z node 33 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:53.974196Z node 33 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 2 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:35:53.975200Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 33:1000 Path# /dev/disk 2026-01-08T15:35:53.975253Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 34:1000 Path# /dev/disk 2026-01-08T15:35:53.975295Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 35:1000 Path# /dev/disk 2026-01-08T15:35:53.975334Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 36:1000 Path# /dev/disk 2026-01-08T15:35:53.975359Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 37:1000 Path# /dev/disk 2026-01-08T15:35:53.975398Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 38:1000 Path# /dev/disk 2026-01-08T15:35:53.975478Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 39:1000 Path# /dev/disk 2026-01-08T15:35:53.975513Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 40:1000 Path# /dev/disk 2026-01-08T15:35:53.975537Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 41:1000 Path# /dev/disk 2026-01-08T15:35:53.975561Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 42:1000 Path# /dev/disk 2026-01-08T15:35:53.975585Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 43:1000 Path# /dev/disk 2026-01-08T15:35:53.975615Z node 33 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 44:1000 Path# /dev/disk Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2026-01-08T15:35:54.001955Z node 33 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" ConfigTxSeqNo: 1 |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> YdbProxy::DropTable >> BsControllerConfig::ExtendByCreatingSeparateBox [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool >> YdbProxy::CreateTopic >> YdbProxy::CreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRows [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:40.908723Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:41.049435Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:41.049827Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:41.049997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:41.503766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:41.503913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:41.591440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886537595615 != 1767886537595619 2026-01-08T15:35:41.602384Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:41.657120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:41.772937Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:42.086299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:42.101898Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:42.220750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.269144Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:904:2774] 2026-01-08T15:35:42.269425Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.320710Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.321022Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:906:2776] 2026-01-08T15:35:42.321232Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.329333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.331203Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:42.331323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:42.331404Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:42.331804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.332691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.332781Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:947:2774] in generation 1 2026-01-08T15:35:42.334854Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.335088Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.336559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:42.336631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:42.336678Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:42.336975Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.337254Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:911:2780] 2026-01-08T15:35:42.337459Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.345525Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:915:2783] 2026-01-08T15:35:42.345729Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.354709Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.354795Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:958:2776] in generation 1 2026-01-08T15:35:42.356563Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.356663Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.358209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2026-01-08T15:35:42.358282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037891 2026-01-08T15:35:42.358348Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037891 2026-01-08T15:35:42.358675Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.358810Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.358879Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037891 persisting started state actor id [1:968:2780] in generation 1 2026-01-08T15:35:42.359197Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.359294Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.360634Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2026-01-08T15:35:42.360692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037890 2026-01-08T15:35:42.360741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037890 2026-01-08T15:35:42.361011Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.361102Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.361157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037890 persisting started state actor id [1:969:2783] in generation 1 2026-01-08T15:35:42.372229Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.409236Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:42.409467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.409586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:974:2816] 2026-01-08T15:35:42.409643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:42.409687Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:42.409739Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.410434Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.410497Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:42.410565Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.410636Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:975:2817] 2026-01-08T15:35:42.410667Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:42.410692Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:42.410720Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:42.410799Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.410830Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037891 2026-01-08T15:35:42.410889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.410951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037891, actorId: [1:976:2818] 2026-01-08T15:35:42.410980Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037891 2026-01-08T15:35:42.411010Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2026-01-08T15:35:42.411061Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2026-01-08T15:35:42.411291Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:42.411387Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:42.411511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.411540Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:35:42.411589Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.411640Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037890, a ... 5:35:54.595590Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:54.596010Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:54.596071Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:54.596123Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:35:54.596435Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:35:54.596583Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:54.596785Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:54.596856Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:35:54.597372Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:54.598213Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:54.600710Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:35:54.600777Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:54.602509Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:35:54.602630Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:54.604055Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:54.604120Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:54.604188Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:35:54.604269Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:54.604336Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:35:54.604426Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:54.606382Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:54.608294Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:35:54.608519Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:35:54.608593Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:35:54.624484Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:54.624604Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:962:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:54.625018Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:54.626064Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:54.626344Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:54.637911Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:54.646802Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:54.794193Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:54.797646Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:35:54.836521Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:54.933957Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1053:2873], serverId# [3:1054:2874], sessionId# [0:0:0] 2026-01-08T15:35:54.935122Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:35:54.935335Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=3 2026-01-08T15:35:54.947692Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:55.041898Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:3] at 72075186224037888 2026-01-08T15:35:55.042078Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:3] at 72075186224037888, row count=3 2026-01-08T15:35:55.054120Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:55.138105Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:4] at 72075186224037888 2026-01-08T15:35:55.138294Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:4] at 72075186224037888, row count=3 2026-01-08T15:35:55.151143Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:55.155153Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2026-01-08T15:35:55.166589Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2026-01-08T15:35:55.166689Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:55.168520Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:55.168978Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:55.169250Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:55.169301Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:35:55.169357Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715663] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:35:55.169622Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:35:55.169695Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:55.171208Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715663, MessageQuota: 1 2026-01-08T15:35:55.171564Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:35:55.171756Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715663, PendingAcks: 0 2026-01-08T15:35:55.171819Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715663, MessageQuota: 0 2026-01-08T15:35:55.221508Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:35:55.221586Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715663, at: 72075186224037888 2026-01-08T15:35:55.221792Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:55.221833Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:35:55.221872Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715663] at 72075186224037888 for ReadTableScan 2026-01-08T15:35:55.222013Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:55.222080Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:55.222124Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_upload_rows/unittest >> YdbProxy::MakeDirectory >> ConvertMiniKQLValueToYdbValueTest::Void [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Struct [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Tuple [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Variant [GOOD] >> ConvertTableDescription::StorageSettings [GOOD] >> ConvertTableDescription::ColumnFamilies >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 >> ConvertTableDescription::ColumnFamilies [GOOD] >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] Test command err: 2026-01-08T15:31:56.431998Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013913304772512:2138];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:56.432064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:56.555056Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:31:57.074452Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:57.156278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:57.156370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:57.290788Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013913304772412:2081] 1767886316403871 != 1767886316403874 2026-01-08T15:31:57.374026Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:57.391599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:57.397091Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:57.461388Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:57.723597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0021bd/r3tmp/yandexT7GJNR.tmp 2026-01-08T15:31:57.723625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0021bd/r3tmp/yandexT7GJNR.tmp 2026-01-08T15:31:57.723782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0021bd/r3tmp/yandexT7GJNR.tmp 2026-01-08T15:31:57.723881Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:57.776827Z INFO: TTestServer started on Port 18710 GrpcPort 29665 PQClient connected to localhost:29665 2026-01-08T15:31:58.164155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:58.171686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:31:58.196390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:31:58.327618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:58.555285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2026-01-08T15:32:01.316964Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013934779609936:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:01.317114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:01.320323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013934779609948:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:01.320408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013934779609949:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:01.320871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:01.330524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:01.353429Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013934779609952:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:32:01.436570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013913304772512:2138];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:01.436643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:01.613298Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013934779610016:2648] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:01.651859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:01.718711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:01.830013Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013934779610027:2345], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:32:01.832831Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=NGU0NmRiNWMtNTM2MDA5ZC0zOTBhY2RhOS01NDJhYzUxMQ==, ActorId: [1:7593013934779609933:2331], ActorState: ExecuteState, LegacyTraceId: 01kef3rxmha1b0j2zd75da9864, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:32:01.841193Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:32:01.866903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013939074577611:2831] === CheckClustersList. Ok 2026-01-08T15:32:07.598476Z :WriteToTopic_Demo_19_RestartNo_Table INFO: TTopicSdkTestSetup started 2026-01-08T15:32:07.642668Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:32:07.682779Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013960549414292:2929] connected; active server actors: 1 2026-01-08T15:32:07.684472Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:32:07.693700Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:32:07.693823Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2026-01-08T15:32:07.694573Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:32:07.697920Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3055: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2026-01-08T15:32:07.698178 ... Z :INFO: [/Root] [/Root] [d8f558d8-359d3ca9-55a299f8-e02f2436] Closing read session. Close timeout: 0.000000s 2026-01-08T15:35:52.070952Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2026-01-08T15:35:52.071012Z :INFO: [/Root] [/Root] [d8f558d8-359d3ca9-55a299f8-e02f2436] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2015 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:35:52.071129Z :NOTICE: [/Root] [/Root] [d8f558d8-359d3ca9-55a299f8-e02f2436] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:35:52.071187Z :DEBUG: [/Root] [/Root] [d8f558d8-359d3ca9-55a299f8-e02f2436] [] Abort session to cluster 2026-01-08T15:35:52.071768Z :DEBUG: [/Root] 0x00007DA30190E590 TDirectReadSessionManager ServerSessionId=test-consumer_13_1_13841887336272609584_v1 Close 2026-01-08T15:35:52.072169Z :DEBUG: [/Root] 0x00007DA30190E590 TDirectReadSessionManager ServerSessionId=test-consumer_13_1_13841887336272609584_v1 Close 2026-01-08T15:35:52.072275Z :NOTICE: [/Root] [/Root] [d8f558d8-359d3ca9-55a299f8-e02f2436] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:35:52.074621Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2026-01-08T15:35:52.074680Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0] PartitionId [0] Generation [2] Write session will now close 2026-01-08T15:35:52.074727Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0] PartitionId [0] Generation [2] Write session: aborting 2026-01-08T15:35:52.075219Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2026-01-08T15:35:52.075265Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0] PartitionId [0] Generation [2] Write session: destroy 2026-01-08T15:35:52.082496Z node 13 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:52.082508Z node 13 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:84: Direct read proxy [13:7593014915859536024:2524]: session cookie 2 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 grpc read done: success# 0, data# { } 2026-01-08T15:35:52.082534Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:87: Direct read proxy [13:7593014915859536024:2524]: session cookie 2 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1grpc read failed 2026-01-08T15:35:52.082547Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 grpc read failed 2026-01-08T15:35:52.082567Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:66: Direct read proxy [13:7593014915859536024:2524]: session cookie 2 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 grpc closed 2026-01-08T15:35:52.082586Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 grpc closed 2026-01-08T15:35:52.082589Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:167: Direct read proxy [13:7593014915859536024:2524]: session cookie 2 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 proxy is DEAD 2026-01-08T15:35:52.082632Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer test-consumer session test-consumer_13_1_13841887336272609584_v1 is DEAD 2026-01-08T15:35:52.083221Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0 grpc read done: success: 0 data: 2026-01-08T15:35:52.083237Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0 grpc read failed 2026-01-08T15:35:52.083264Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0 grpc closed 2026-01-08T15:35:52.083281Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|fb3dfa02-6a9ff3df-fa3a9f09-d77ebeac_0 is DEAD 2026-01-08T15:35:52.084289Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:52.085264Z node 13 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037895][topic_A] pipe [13:7593014915859536016:2519] disconnected. 2026-01-08T15:35:52.085327Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037895][topic_A] pipe [13:7593014915859536016:2519] disconnected; active server actors: 1 2026-01-08T15:35:52.085385Z node 13 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037895][topic_A] pipe [13:7593014915859536016:2519] client test-consumer disconnected session test-consumer_13_1_13841887336272609584_v1 2026-01-08T15:35:52.085535Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037894] Destroy direct read session test-consumer_13_1_13841887336272609584_v1 2026-01-08T15:35:52.085588Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [13:7593014915859536019:2522] destroyed 2026-01-08T15:35:52.085643Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [13:7593014907269601362:2498] destroyed 2026-01-08T15:35:52.085707Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037894][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:35:52.085762Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.085783Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.085798Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.085819Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.085837Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.085901Z node 13 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_13_1_13841887336272609584_v1 2026-01-08T15:35:52.139956Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.140015Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.140049Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.140077Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.140093Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.158433Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.158478Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.158494Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.158512Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.158527Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.239932Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.239969Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.239987Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.240008Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.240027Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.258720Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.258789Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.258826Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.258871Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.258906Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.342460Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.342503Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.342521Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.342540Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.342555Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:52.359007Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:52.359049Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.359067Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:52.359085Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:52.359100Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist |87.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:30:55.362532Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:55.506672Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:30:55.507049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:30:55.507198Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:30:55.884663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:55.884809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:56.154809Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886250928187 != 1767886250928191 2026-01-08T15:30:56.207568Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:56.278747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:56.432512Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:56.686021Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:30:57.148395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:57.401092Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:30:57.401161Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:30:57.401262Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:866:2749] 2026-01-08T15:30:57.667776Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:866:2749] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:30:57.667892Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:866:2749] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:30:57.668540Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:30:57.668679Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:866:2749] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:30:57.669086Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:30:57.669218Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:866:2749] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:30:57.669371Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:866:2749] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:30:57.671556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:57.672107Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:30:57.672913Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:866:2749] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:30:57.672984Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:866:2749] txid# 281474976715657 SEND to# [1:847:2730] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:30:57.722809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:30:57.723957Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:30:57.724297Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:890:2770] 2026-01-08T15:30:57.724613Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:30:57.801762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:30:57.802729Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:30:57.802876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:30:57.804550Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:30:57.804661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:30:57.804724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:30:57.805088Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:30:57.805235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:30:57.805325Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:906:2770] in generation 1 2026-01-08T15:30:57.819205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:30:57.904114Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:30:57.904332Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:30:57.904450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:908:2780] 2026-01-08T15:30:57.904488Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:30:57.904524Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:30:57.904582Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:30:57.904740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:890:2770], Recipient [1:890:2770]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:30:57.904787Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:30:57.905137Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:30:57.905255Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:30:57.905371Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:30:57.905429Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:30:57.905474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:30:57.905507Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:30:57.905539Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:30:57.905573Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:30:57.905637Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:30:57.905741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:893:2771], Recipient [1:890:2770]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:57.905773Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:57.905827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:889:2769], serverId# [1:893:2771], sessionId# [0:0:0] 2026-01-08T15:30:57.906227Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:893:2771] 2026-01-08T15:30:57.906272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:30:57.914532Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:30:57.914838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:30:57.914906Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:30:57.915016Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:30:57.915063Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status ... ected with status OK role: Leader [27:1173:2971] 2026-01-08T15:35:52.265328Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:323: TClient[72075186224037888] send queued [27:1173:2971] 2026-01-08T15:35:52.265433Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72075186224037888] push event to server [27:1173:2971] 2026-01-08T15:35:52.265574Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:331: TClient[72075186224037888] shutdown pipe due to pending shutdown request [27:1173:2971] 2026-01-08T15:35:52.265681Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:514: TClient[72075186224037888] notify reset [27:1173:2971] 2026-01-08T15:35:52.266032Z node 27 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553210, Sender [27:1172:2970], Recipient [27:918:2782]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 38 } CompactBorrowed: false 2026-01-08T15:35:52.266205Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} queued, type NKikimr::NDataShard::TDataShard::TTxCompactTable 2026-01-08T15:35:52.266382Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:35:52.266553Z node 27 :TABLET_EXECUTOR DEBUG: TCompactionLogic PrepareForceCompaction for 72075186224037888 table 1001, mode Full, forced state None, forced mode Full 2026-01-08T15:35:52.266762Z node 27 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 38 localTid# 1001, requested from [27:1172:2970], partsCount# 0, memtableSize# 656, memtableWaste# 3952, memtableRows# 2 2026-01-08T15:35:52.266958Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:35:52.267111Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:35:52.267550Z node 27 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888: task 1, edge 9223372036854775807/0, generation 0 2026-01-08T15:35:52.267695Z node 27 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:16} starting compaction 2026-01-08T15:35:52.268269Z node 27 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} starting Scan{1 on 1001, Compact{72075186224037888.1.16, eph 1}} 2026-01-08T15:35:52.268489Z node 27 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} started compaction 1 2026-01-08T15:35:52.268604Z node 27 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888 started compaction 1 generation 0 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 137740556368054015 2026-01-08T15:35:52.272935Z node 27 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} Compact 1 on TGenCompactionParams{1001: gen 0 epoch +inf, 0 parts} step 16, product {tx status + 1 parts epoch 2} done 2026-01-08T15:35:52.273425Z node 27 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CompactionFinished for 72075186224037888: compaction 1, generation 0 2026-01-08T15:35:52.273613Z node 27 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 1, state Free, final id 0, final level 0 2026-01-08T15:35:52.273719Z node 27 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 3, state Free, final id 0, final level 0 2026-01-08T15:35:52.274378Z node 27 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:02.513196Z 2026-01-08T15:35:52.274611Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} queued, type NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs 2026-01-08T15:35:52.274765Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:35:52.274921Z node 27 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2026-01-08T15:35:52.275079Z node 27 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [27:1172:2970]pathId# [OwnerId: 72057594046644480, LocalPathId: 38] 2026-01-08T15:35:52.275971Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} hope 1 -> done Change{17, redo 83b alter 0b annex 0, ~{ 27 } -{ }, 0 gb} 2026-01-08T15:35:52.276160Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} release 4194304b of static, Memory{0 dyn 0} ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 6386138104442571774 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 3578626551256560946 ========= Starting an immediate read ========= 2026-01-08T15:35:52.558650Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72075186224037888] send [27:1107:2916] 2026-01-08T15:35:52.558788Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72075186224037888] push event to server [27:1107:2916] 2026-01-08T15:35:52.559250Z node 27 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [27:1197:2978], Recipient [27:918:2782]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2026-01-08T15:35:52.559522Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2026-01-08T15:35:52.559703Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:35:52.559895Z node 27 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:35:52.560028Z node 27 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2502/281474976715662 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2026-01-08T15:35:52.560144Z node 27 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037888 changed HEAD read to non-repeatable v2502/18446744073709551615 2026-01-08T15:35:52.560307Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2026-01-08T15:35:52.560500Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:35:52.560626Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:35:52.560722Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:35:52.560808Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:35:52.560897Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2026-01-08T15:35:52.560981Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:35:52.561024Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:35:52.561054Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:35:52.561089Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:35:52.561307Z node 27 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2026-01-08T15:35:52.561627Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is DelayComplete 2026-01-08T15:35:52.561693Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:35:52.561788Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:35:52.561876Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:35:52.561947Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:5] at 72075186224037888 is Executed 2026-01-08T15:35:52.561981Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:35:52.562031Z node 27 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:5] at 72075186224037888 has finished 2026-01-08T15:35:52.562127Z node 27 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:35:52.562309Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{18, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:35:52.562509Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:35:52.658344Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:17} Tx{24, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2026-01-08T15:35:52.658570Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:17} Tx{24, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:35:52.658971Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:17} Tx{24, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{16, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2026-01-08T15:35:52.659142Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:17} Tx{24, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:35:52.660187Z node 27 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:18} commited cookie 1 for step 17 2026-01-08T15:35:52.660613Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72057594046382081] send [27:526:2472] 2026-01-08T15:35:52.660728Z node 27 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72057594046382081] push event to server [27:526:2472] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_volatile/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/ydb_convert/ut/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TContinuousBackupWithRebootsTests::TakeSeveralIncrementalBackups [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TA] $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyWithOmitIndexesTrueSkipsIndexes [GOOD] >> KqpQueryService::CloseSessionsWithLoad |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:41.310111Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:41.475092Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:41.475614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:41.475859Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:41.788867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:41.789024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:41.880747Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886538227888 != 1767886538227892 2026-01-08T15:35:41.892745Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:41.943763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:42.044315Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:42.371785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:42.387042Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:42.499878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:42.569189Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:42.570423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:42.570808Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:35:42.571173Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.623142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:886:2765]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:42.624063Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.624206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.626253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:42.626383Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:42.626458Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:42.627031Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.627218Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.627333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:35:42.638568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.668975Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:42.669260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.669411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:35:42.669465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:42.669503Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:42.669584Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.669872Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:886:2765], Recipient [1:886:2765]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.669923Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.670474Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:42.670586Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:42.670684Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:42.670732Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:42.670810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:35:42.670857Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:35:42.670907Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:35:42.670941Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:42.670986Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:42.671436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:887:2766], Recipient [1:886:2765]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:42.671481Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:42.671530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:35:42.671617Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:887:2766] 2026-01-08T15:35:42.671686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:35:42.671788Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:35:42.672073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:35:42.672159Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:35:42.672270Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:35:42.672326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2026-01-08T15:35:42.672372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2026-01-08T15:35:42.672410Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2026-01-08T15:35:42.672450Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2026-01-08T15:35:42.672830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2026-01-08T15:35:42.672879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2026-01-08T15:35:42.672918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2026-01-08T15:35:42.672969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2026-01-08T15:35:42.673032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2026-01-08T15:35:42.673064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2026-01-08T15:35:42.673113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2026-01-08T15:35:42.673148Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2026-01-08T15:35:42.673177Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1853: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2026-01-08T15:35:42.674802Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269746185, Sender [1:904:2776], Recipient [1:886:2765]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2026-01-08T15:35:42.674861Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:42.685729Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:35:42.685814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:281474 ... pp:1883: Execution status for [4500:281474976715668] at 72075186224037889 is DelayComplete 2026-01-08T15:35:57.729427Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037889 executing on unit CompleteOperation 2026-01-08T15:35:57.729486Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [4500:281474976715668] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:35:57.729532Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:35:57.729570Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037889 is Executed 2026-01-08T15:35:57.729597Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:35:57.729630Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [4500:281474976715668] at 72075186224037889 has finished 2026-01-08T15:35:57.729674Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:57.729715Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2026-01-08T15:35:57.729748Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2026-01-08T15:35:57.729794Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2026-01-08T15:35:57.743149Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:57.743250Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:57.743294Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [4500:281474976715668] at 72075186224037889 on unit CompleteOperation 2026-01-08T15:35:57.743387Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [4500 : 281474976715668] from 72075186224037889 at tablet 72075186224037889 send result to client [3:1341:3112], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:57.743466Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:57.743943Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269287940, Sender [3:1341:3112], Recipient [3:1168:2963]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715668 Cleared: true 2026-01-08T15:35:57.743990Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2026-01-08T15:35:57.744079Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 4500} 2026-01-08T15:35:57.744136Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:57.744170Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:57.744388Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [3:1168:2963], Recipient [3:1168:2963]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:57.744431Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:57.744489Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2026-01-08T15:35:57.744528Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:57.744573Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [4500:281474976715668] at 72075186224037890 for WaitForStreamClearance 2026-01-08T15:35:57.744610Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037890 on unit WaitForStreamClearance 2026-01-08T15:35:57.744647Z node 3 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [4500:281474976715668] at 72075186224037890 2026-01-08T15:35:57.744688Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037890 is Executed 2026-01-08T15:35:57.744725Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037890 executing on unit WaitForStreamClearance 2026-01-08T15:35:57.744758Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [4500:281474976715668] at 72075186224037890 to execution unit ReadTableScan 2026-01-08T15:35:57.744792Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037890 on unit ReadTableScan 2026-01-08T15:35:57.745037Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037890 is Continue 2026-01-08T15:35:57.745076Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:57.745109Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2026-01-08T15:35:57.745151Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 72075186224037890 out-of-order limits exceeded 2026-01-08T15:35:57.745187Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2026-01-08T15:35:57.746010Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435082, Sender [3:1360:3129], Recipient [3:1168:2963]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2026-01-08T15:35:57.746052Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3183: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2026-01-08T15:35:57.746382Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 1 2026-01-08T15:35:57.747090Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715668, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:35:57.748683Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715668, PendingAcks: 0 2026-01-08T15:35:57.748744Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 0 2026-01-08T15:35:57.829606Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037890 2026-01-08T15:35:57.829693Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715668, at: 72075186224037890 2026-01-08T15:35:57.829900Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [3:1168:2963], Recipient [3:1168:2963]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:57.829947Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:57.830016Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2026-01-08T15:35:57.830060Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:57.830107Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [4500:281474976715668] at 72075186224037890 for ReadTableScan 2026-01-08T15:35:57.830144Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037890 on unit ReadTableScan 2026-01-08T15:35:57.830183Z node 3 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [4500:281474976715668] at 72075186224037890 error: , IsFatalError: 0 2026-01-08T15:35:57.830228Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037890 is Executed 2026-01-08T15:35:57.830266Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037890 executing on unit ReadTableScan 2026-01-08T15:35:57.830297Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [4500:281474976715668] at 72075186224037890 to execution unit CompleteOperation 2026-01-08T15:35:57.830350Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037890 on unit CompleteOperation 2026-01-08T15:35:57.830590Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037890 is DelayComplete 2026-01-08T15:35:57.830634Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037890 executing on unit CompleteOperation 2026-01-08T15:35:57.830668Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [4500:281474976715668] at 72075186224037890 to execution unit CompletedOperations 2026-01-08T15:35:57.830706Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [4500:281474976715668] at 72075186224037890 on unit CompletedOperations 2026-01-08T15:35:57.830743Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [4500:281474976715668] at 72075186224037890 is Executed 2026-01-08T15:35:57.830770Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [4500:281474976715668] at 72075186224037890 executing on unit CompletedOperations 2026-01-08T15:35:57.830799Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [4500:281474976715668] at 72075186224037890 has finished 2026-01-08T15:35:57.830834Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:57.830867Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2026-01-08T15:35:57.830901Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2026-01-08T15:35:57.830933Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2026-01-08T15:35:57.842652Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:57.842735Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2026-01-08T15:35:57.842775Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [4500:281474976715668] at 72075186224037890 on unit CompleteOperation 2026-01-08T15:35:57.842837Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [4500 : 281474976715668] from 72075186224037890 at tablet 72075186224037890 send result to client [3:1341:3112], exec latency: 0 ms, propose latency: 1 ms 2026-01-08T15:35:57.842921Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_upload_rows/unittest >> YdbProxy::RemoveDirectory [GOOD] >> YdbProxy::StaticCreds ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] Test command err: 2026-01-08T15:31:58.944560Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013920898000044:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:58.949606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:59.092905Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:31:59.552021Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:59.585522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:59.596398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:59.700078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:59.760599Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:59.838669Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013920898000012:2081] 1767886318916653 != 1767886318916656 2026-01-08T15:31:59.850812Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:32:00.013826Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:32:00.071252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0021bc/r3tmp/yandexdleL4x.tmp 2026-01-08T15:32:00.071284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0021bc/r3tmp/yandexdleL4x.tmp 2026-01-08T15:32:00.071502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0021bc/r3tmp/yandexdleL4x.tmp 2026-01-08T15:32:00.071634Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:32:00.100836Z INFO: TTestServer started on Port 24393 GrpcPort 1121 PQClient connected to localhost:1121 2026-01-08T15:32:00.497744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:32:00.596591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:00.765621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2026-01-08T15:32:03.805333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013942372837538:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:03.806041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013942372837525:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:03.806143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:03.816053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013942372837549:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:03.816147Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:32:03.820360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:32:03.844314Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013942372837548:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2026-01-08T15:32:03.929292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013920898000044:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:32:03.929631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:32:04.115379Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013942372837614:2647] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:32:04.186737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:04.288380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:32:04.515347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013950962772516:2833] === CheckClustersList. Ok 2026-01-08T15:32:10.786511Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2026-01-08T15:32:10.837574Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:32:10.911647Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013972437609208:2934] connected; active server actors: 1 2026-01-08T15:32:10.911906Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:32:10.913647Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:32:10.914780Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:32:10.914889Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2026-01-08T15:32:10.919488Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3055: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2026-01-08T15:32:10.919758Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:788: [PQ: 72075186224037892] no config, start with empty partitions and default config 2026-01-08T15:32:10.919799Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:927: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2026-01-08T15:32:10.919848Z node 1 :PERSQUEUE INFO: pq_impl.cpp:613: [PQ: 72075186224037892] doesn't have tx writes info 2026-01-08T15:32:10.921030Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72075186224037892] server connected, pipe [1:7593013972437609207:2933], now have 1 active actors on pipe 2026-01-08T15:32:10.921056Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72075186224037892] server connected, pipe [1:7593013972437609247:2456], now have 1 active actors on pipe 2026-01-08T15:32:10.927157Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:394: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2026-01-08T15:32:10.949423Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3626: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2026-01-08T15:32:10.950788Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1059: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2026-01-08T15:32:10.953476Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2784: [PQ: 72075186224037892] server connected, pipe [1:7593013972437609258:2962], now have 1 active actors on pipe 2026-01-08T15:32:10.954381Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:86: [test-topic:0:Initializer] Start initializing step TInitConfigStep 2026-01-08T15:32:10.954469Z node 1 :PERSQUEUE DEBUG: partition.cpp:1324: [72075186224037892][Partition][0][StateInit] HandleOnInit TEvPQ::TEvProposePartitionConfig 2026-01-08T15:32:10.954793Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:86: [test-topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2026-01-08T15:32:10.955102Z node 1 :PERSQUEUE INFO: partition_init.cpp:1155: [72075186224037892][Partition][0][StateInit] bootstrapping 0 [1:7593013972437609261:2457] 2026-01-08T15:32:10.956116Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:86: [test-topic:0:Initializer] Start initializing step TInitFieldsStep 2026-01-08T15:32:10.956148Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:66: [test-topic:0:Initializer] Initializing completed. 2026-01-08T15:32:10.956178Z node 1 :PERSQUEUE INFO: partition.cpp:712: [ ... iled 2026-01-08T15:35:54.699887Z node 14 :PQ_READ_PROXY INFO: direct_read_actor.cpp:66: Direct read proxy [14:7593014913259725742:2560]: session cookie 2 consumer test-consumer session test-consumer_14_1_16808562226932957702_v1 grpc closed 2026-01-08T15:35:54.699911Z node 14 :PQ_READ_PROXY INFO: direct_read_actor.cpp:167: Direct read proxy [14:7593014913259725742:2560]: session cookie 2 consumer test-consumer session test-consumer_14_1_16808562226932957702_v1 proxy is DEAD 2026-01-08T15:35:54.700558Z node 14 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|568553e-7a131106-70c65d3f-53309d04_0 grpc read done: success: 0 data: 2026-01-08T15:35:54.700573Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|568553e-7a131106-70c65d3f-53309d04_0 grpc read failed 2026-01-08T15:35:54.700600Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|568553e-7a131106-70c65d3f-53309d04_0 grpc closed 2026-01-08T15:35:54.700619Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|568553e-7a131106-70c65d3f-53309d04_0 is DEAD 2026-01-08T15:35:54.701373Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:54.701426Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:54.701892Z node 14 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037895][topic_A] pipe [14:7593014913259725731:2554] disconnected. 2026-01-08T15:35:54.701932Z node 14 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037895][topic_A] pipe [14:7593014913259725731:2554] disconnected; active server actors: 1 2026-01-08T15:35:54.701956Z node 14 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037895][topic_A] pipe [14:7593014913259725731:2554] client test-consumer disconnected session test-consumer_14_1_16808562226932957702_v1 2026-01-08T15:35:54.702050Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037894] Destroy direct read session test-consumer_14_1_16808562226932957702_v1 2026-01-08T15:35:54.702079Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [14:7593014913259725734:2557] destroyed 2026-01-08T15:35:54.702122Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037896] server disconnected, pipe [14:7593014913259725613:2528] destroyed 2026-01-08T15:35:54.702152Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037896] server disconnected, pipe [14:7593014913259725616:2528] destroyed 2026-01-08T15:35:54.702186Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037896][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:35:54.702216Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.702236Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.702251Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.702270Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.702284Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.702361Z node 14 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_14_1_16808562226932957702_v1 2026-01-08T15:35:54.706668Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2026-01-08T15:35:54.706721Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0] PartitionId [0] Generation [1] Write session will now close 2026-01-08T15:35:54.706763Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0] PartitionId [0] Generation [1] Write session: aborting 2026-01-08T15:35:54.707327Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2026-01-08T15:35:54.707366Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0] PartitionId [0] Generation [1] Write session: destroy 2026-01-08T15:35:54.708799Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.708840Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.708859Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.708883Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.708901Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.708963Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.708977Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.708988Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.709005Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.709031Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.709064Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.709077Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.709088Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.709106Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.709119Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.718790Z node 14 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0 grpc read done: success: 0 data: 2026-01-08T15:35:54.718824Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0 grpc read failed 2026-01-08T15:35:54.718862Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0 grpc closed 2026-01-08T15:35:54.718883Z node 14 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|8aeaee74-790bc4d-ac2dbc58-8d37245d_0 is DEAD 2026-01-08T15:35:54.719837Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:54.719878Z node 14 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:35:54.721513Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [14:7593014913259725568:2520] destroyed 2026-01-08T15:35:54.721561Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [14:7593014913259725573:2520] destroyed 2026-01-08T15:35:54.721596Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037894][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:35:54.721633Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.721655Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.721669Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.721691Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.721704Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.811125Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.811164Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811182Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.811204Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811222Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.811289Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.811302Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811315Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.811331Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811343Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:35:54.811374Z node 14 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037896][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:35:54.811386Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811398Z node 14 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037896][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:35:54.811411Z node 14 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037896][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:35:54.811423Z node 14 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037896][Partition][0][StateIdle] Try persist |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] Test command err: 2026-01-08T15:34:27.005207Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014562209134624:2230];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:27.005397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:27.077021Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:27.150656Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:27.720461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:27.720562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:27.751074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:27.854353Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:27.858242Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:28.102431Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:28.127668Z node 3 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.013172s 2026-01-08T15:34:28.074629Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639239 Duration# 0.012224s 2026-01-08T15:34:28.416752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:28.416774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:28.416782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:28.416905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:28.432579Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:28.751565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:31.625193Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014579389004975:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.625317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.625672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014579389004985:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.625752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:31.912967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:32.006431Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014562209134624:2230];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:32.006497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:32.198697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014583683972449:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.198863Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.199305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014583683972454:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.199445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014583683972455:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.199517Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:32.204048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:32.251572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014583683972458:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:34:32.360811Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014583683972528:3096] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:42.698899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:34:42.698928Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:29.632538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976712661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) Table has 1 shards 2026-01-08T15:35:34.392932Z node 1 :TX_DATASHARD ERROR: datashard__stats.cpp:704: CPU usage 1.7158 is higher than threshold of 1 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037888 table: [/Root/Foo] 2026-01-08T15:35:39.400669Z node 1 :TX_DATASHARD ERROR: datashard__stats.cpp:704: CPU usage 1.7158 is higher than threshold of 1 in-flight Tx: 0 immediate Tx: 0 readIterators: 2 at datashard: 72075186224037888 table: [/Root/Foo] 2026-01-08T15:35:44.403653Z node 1 :TX_DATASHARD ERROR: datashard__stats.cpp:704: CPU usage 1.7158 is higher than threshold of 1 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037888 table: [/Root/Foo] 2026-01-08T15:35:49.405718Z node 1 :TX_DATASHARD ERROR: datashard__stats.cpp:704: CPU usage 9.6607 is higher than threshold of 1 in-flight Tx: 0 immediate Tx: 0 readIterators: 2 at datashard: 72075186224037888 table: [/Root/Foo] 2026-01-08T15:35:49.508079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats.cpp:591: Want to split tablet 72075186224037888 by load: split by load (rowCount: 2000, minRowCount: 0, shardSize: 141098, minShardSize: 0, shardCount: 1, expectedShardCount: 1, maxShardCount: 50, cpuUsage: 96607, cpuUsageThreshold: 50000) 2026-01-08T15:35:49.508125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats.cpp:694: Requesting full tablet stats 72075186224037888 to split it 2026-01-08T15:35:49.508314Z node 1 :TX_DATASHARD NOTICE: datashard.cpp:2279: Started key access sampling at datashard: 72075186224037888 2026-01-08T15:35:49.508526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats_histogram.cpp:302: Got partition histogram at tablet 72057594046644480 from datashard 72075186224037888 state Ready data size 141098 row count 2000 buckets 9 ready 1 2026-01-08T15:35:49.508575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats_histogram.cpp:382: TTxPartitionHistogram Process detailed partition statistics at tablet 72057594046644480 from datashard 72075186224037888 from follower ID 0 for pathId [OwnerId: 72057594046644480, LocalPathId: 38], state Ready, data size buckets 9, key access buckets 0 2026-01-08T15:35:54.407020Z node 1 :TX_DATASHARD ERROR: datashard__stats.cpp:704: CPU usage 9.6607 is higher than threshold of 1 in-flight Tx: 0 immediate Tx: 0 readIterators: 2 at datashard: 72075186224037888 table: [/Root/Foo] 2026-01-08T15:35:54.508766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats.cpp:591: Want to split tablet 72075186224037888 by load: split by load (rowCount: 2000, minRowCount: 0, shardSize: 141098, minShardSize: 0, shardCount: 1, expectedShardCount: 1, maxShardCount: 50, cpuUsage: 96607, cpuUsageThreshold: 50000) 2026-01-08T15:35:54.508793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats.cpp:694: Requesting full tablet stats 72075186224037888 to split it 2026-01-08T15:35:54.511899Z node 1 :TX_DATASHARD NOTICE: datashard.cpp:2281: Extended key access sampling at datashard: 72075186224037888 2026-01-08T15:35:54.512070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__table_stats_histogram.cpp:302: Got partition histogram at tablet 72057594046644480 from datashard 72075186224037888 state Ready data size 141098 row count 2000 buckets 9 ready 1 2026-01-08T15:35:54.512126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats_histogram.cpp:382: TTxPartitionHistogram ... 44480 2026-01-08T15:35:54.513099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976715693:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SplitBoundary { SerializedKeyPrefix: "\002\000\004\000\000\000\252\335\332\204\000\000\000\200" } SchemeshardId: 72057594046644480 2026-01-08T15:35:54.513710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976715693:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\252\335\332\204\000\000\000\200" ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\004\000\000\000\252\335\332\204\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SplitBoundary { SerializedKeyPrefix: "\002\000\004\000\000\000\252\335\332\204\000\000\000\200" } SchemeshardId: 72057594046644480 2026-01-08T15:35:54.513746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976715693:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2026-01-08T15:35:54.519845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715693:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2026-01-08T15:35:54.535898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976715693:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2026-01-08T15:35:54.536234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976715693:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2026-01-08T15:35:54.536293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976715693:0 2 -> 3 2026-01-08T15:35:54.539606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:84: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976715693:0, at schemeshard: 72057594046644480 2026-01-08T15:35:54.556125Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:7593014935871382169:8648] 2026-01-08T15:35:54.556981Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:7593014935871382140:8634] 2026-01-08T15:35:54.577637Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:54.577773Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:54.577990Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:54.579157Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:35:54.579240Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2026-01-08T15:35:54.579395Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2026-01-08T15:35:54.591889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715693:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715693:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715693 TabletId: 72075186224037889 2026-01-08T15:35:54.592310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715693:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715693:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715693 TabletId: 72075186224037890 2026-01-08T15:35:54.592336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976715693:0 3 -> 131 2026-01-08T15:35:54.594995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976715693:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:35:54.639324Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037889 2026-01-08T15:35:54.639498Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:54.639523Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037890 2026-01-08T15:35:54.639586Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037889 2026-01-08T15:35:54.639622Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:35:54.639686Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037890 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:54.639743Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037890 2026-01-08T15:35:54.639774Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037890 2026-01-08T15:35:54.640072Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:54.640186Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2026-01-08T15:35:54.648266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715693:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715693 TabletId: 72075186224037888 2026-01-08T15:35:54.648609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976715693:0 131 -> 132 2026-01-08T15:35:54.650934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2026-01-08T15:35:54.651246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2026-01-08T15:35:54.651318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976715693:0 ProgressState, at schemeshard: 72057594046644480 2026-01-08T15:35:54.655197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 38 Version: 5 PathOwnerId: 72057594046644480, cookie: 281474976715693 2026-01-08T15:35:54.655265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715693 2026-01-08T15:35:54.655288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715693, pathId: [OwnerId: 72057594046644480, LocalPathId: 38], version: 5 2026-01-08T15:35:54.661636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715693:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037888, at schemeshard: 72057594046644480 2026-01-08T15:35:54.661731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976715693:0 progress is 1/1 2026-01-08T15:35:54.661761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976715693:0 progress is 1/1 2026-01-08T15:35:54.661809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976715693:0 2026-01-08T15:35:54.664216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:270: Unable to activate 281474976715693:0 2026-01-08T15:35:54.746392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:28: RunBorrowedCompaction for pathId# [OwnerId: 72057594046644480, LocalPathId: 38], datashard# 72075186224037889, next wakeup# 0.000000s, rate# 0, in queue# 1 shards, running# 0 shards at schemeshard 72057594046644480 2026-01-08T15:35:54.746640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:28: RunBorrowedCompaction for pathId# [OwnerId: 72057594046644480, LocalPathId: 38], datashard# 72075186224037890, next wakeup# 14.995939s, rate# 0, in queue# 1 shards, running# 1 shards at schemeshard 72057594046644480 2026-01-08T15:35:54.746866Z node 1 :TX_DATASHARD INFO: datashard__compact_borrowed.cpp:22: TEvCompactBorrowed request from [1:7593014562209134850:2259] for table [OwnerId: 72057594046644480, LocalPathId: 38] at tablet 72075186224037889 2026-01-08T15:35:54.747056Z node 1 :TX_DATASHARD INFO: datashard__compact_borrowed.cpp:22: TEvCompactBorrowed request from [1:7593014562209134850:2259] for table [OwnerId: 72057594046644480, LocalPathId: 38] at tablet 72075186224037890 2026-01-08T15:35:54.765671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:152: Finished borrowed compaction for pathId# [OwnerId: 72057594046644480, LocalPathId: 38], datashard# 72075186224037889, shardIdx# 72057594046644480:2 in# 23, next wakeup# 14.976913s, rate# 0, in queue# 0 shards, running# 1 shards at schemeshard 72057594046644480 2026-01-08T15:35:54.765951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:152: Finished borrowed compaction for pathId# [OwnerId: 72057594046644480, LocalPathId: 38], datashard# 72075186224037890, shardIdx# 72057594046644480:3 in# 19, next wakeup# 14.976625s, rate# 0, in queue# 0 shards, running# 0 shards at schemeshard 72057594046644480 2026-01-08T15:35:54.771424Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:53: 72075186224037889 CompletedLoansChanged 2026-01-08T15:35:54.771481Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:53: 72075186224037890 CompletedLoansChanged 2026-01-08T15:35:54.774994Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037888 Initiating switch from PreOffline to Offline state 2026-01-08T15:35:54.784277Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3339: 72075186224037888 Reporting state Offline to schemeshard 72057594046644480 2026-01-08T15:35:54.785810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2026-01-08T15:35:54.792778Z node 1 :TX_DATASHARD INFO: datashard.cpp:196: OnTabletStop: 72075186224037888 reason = ReasonStop 2026-01-08T15:35:54.793538Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2026-01-08T15:35:54.793844Z node 1 :TX_DATASHARD INFO: datashard.cpp:256: OnTabletDead: 72075186224037888 2026-01-08T15:35:54.793946Z node 1 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037888 Table has 2 shards |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest >> TSchemeShardConsistentCopyTablesTest::ConsistentCopyWithOmitIndexesTrueSkipsIndexes [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:35:50.161968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:35:50.162105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:50.162155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:35:50.162195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:35:50.162259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:35:50.162293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:35:50.162393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:35:50.162460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:35:50.163469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:35:50.163833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:35:50.258991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:35:50.259065Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:50.280598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:35:50.281078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:35:50.281279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:35:50.303197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:35:50.303620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:35:50.304389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:35:50.305203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:35:50.308545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:50.308757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:35:50.310023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:35:50.310089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:35:50.310202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:35:50.310250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:35:50.310288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:35:50.310485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:35:50.470696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.471880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.472984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.473103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:35:50.473183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... t TxId: 102 at schemeshard: 72057594046678944 2026-01-08T15:35:59.133059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:643: Send to actor: [1:884:2840] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 102 at schemeshard: 72057594046678944 2026-01-08T15:35:59.133467Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435072, Sender [1:129:2154], Recipient [1:129:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2026-01-08T15:35:59.133535Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5265: StateWork, processing event TEvPrivate::TEvProgressOperation 2026-01-08T15:35:59.133610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:35:59.133669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2026-01-08T15:35:59.133727Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:35:59.133766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1081: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2026-01-08T15:35:59.133801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1125: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2026-01-08T15:35:59.133872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2026-01-08T15:35:59.133910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:0 240 -> 240 2026-01-08T15:35:59.140945Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:35:59.141017Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:285: Activate send for 102:0 2026-01-08T15:35:59.141183Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 2146435072, Sender [1:129:2154], Recipient [1:129:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2026-01-08T15:35:59.141219Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5265: StateWork, processing event TEvPrivate::TEvProgressOperation 2026-01-08T15:35:59.198542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:35:59.198653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:0 ProgressState 2026-01-08T15:35:59.198775Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:173: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2026-01-08T15:35:59.198823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:59.198893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:59.198934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:35:59.198965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:59.199001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2026-01-08T15:35:59.199077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:779:2746] message: TxId: 102 2026-01-08T15:35:59.199135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:35:59.199179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:35:59.199211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:35:59.199397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 3 2026-01-08T15:35:59.199432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 4 2026-01-08T15:35:59.207286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:216: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2026-01-08T15:35:59.207442Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:643: Send to actor: [1:779:2746] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 102 at schemeshard: 72057594046678944 2026-01-08T15:35:59.207645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:35:59.207721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:912:2860] 2026-01-08T15:35:59.207970Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 269877764, Sender [1:914:2862], Recipient [1:129:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:35:59.208021Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5346: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2026-01-08T15:35:59.208059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:6215: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2026-01-08T15:35:59.208658Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:983:2929], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true } 2026-01-08T15:35:59.208717Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:59.208853Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:59.209126Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableCopy" took 227us result status StatusSuccess 2026-01-08T15:35:59.209694Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableCopy" PathDescription { Self { Name: "TableCopy" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableCopy" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 40 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 41 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:59.210273Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5248: StateWork, received event# 271122945, Sender [1:984:2930], Recipient [1:129:2154]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/TableCopy/ValueIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true } 2026-01-08T15:35:59.213152Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5260: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2026-01-08T15:35:59.302581Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableCopy/ValueIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:59.302897Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableCopy/ValueIndex" took 345us result status StatusPathDoesNotExist 2026-01-08T15:35:59.303139Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/TableCopy/ValueIndex\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/TableCopy\' (id: [OwnerId: 72057594046678944, LocalPathId: 41])" Path: "/MyRoot/TableCopy/ValueIndex" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/TableCopy" LastExistedPrefixPathId: 41 LastExistedPrefixDescription { Self { Name: "TableCopy" PathId: 41 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000039 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.9%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/services/ydb/table_split_ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_consistent_copy_tables/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> PgCatalog::PgType [GOOD] >> PgCatalog::InformationSchema >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::ListDirectory [GOOD] >> YdbProxy::DropTopic >> YdbProxy::DescribePath [GOOD] >> YdbProxy::DescribeTable |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::AlterTable [GOOD] >> TxUsage::Write_And_Read_Small_Messages_2 [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::CopyTable [GOOD] >> YdbProxy::CopyTables |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> TContinuousBackupWithRebootsTests::TakeSeveralIncrementalBackups [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:120:2058] recipient: [1:114:2144] Leader for TabletID 72057594046678944 is [1:131:2155] sender: [1:132:2058] recipient: [1:114:2144] 2026-01-08T15:34:27.604478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:34:27.604636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:27.604693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:34:27.604768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:34:27.604824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:34:27.604856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:34:27.604943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:34:27.605033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:34:27.606169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:27.606587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:34:27.837571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:8101: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2026-01-08T15:34:27.837697Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:27.838853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:34:27.864388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:34:27.864731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:34:27.864936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:34:27.882467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:34:27.882957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:34:27.883792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:34:27.884899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:34:27.888596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:27.888813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:34:27.890200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:34:27.890273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:34:27.890446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:34:27.890653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:34:27.890714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:34:27.890912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:34:28.225411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.226805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.226987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.227985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:217:2217] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:34:28.228055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# ... 83647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "3_continuousBackupImpl" TopicPath: "/MyRoot/Table/3_continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409555 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409556 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 43 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 50 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:58.704356Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:58.704673Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl1" took 341us result status StatusSuccess 2026-01-08T15:35:58.705274Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl1" PathDescription { Self { Name: "IncrBackupImpl1" PathId: 43 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000041 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IncrementalBackupConfig { Mode: RESTORE_MODE_INCREMENTAL_BACKUP Consistency: CONSISTENCY_WEAK } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 43 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } } PathId: 43 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:58.706796Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:58.707130Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl2" took 335us result status StatusSuccess 2026-01-08T15:35:58.707765Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl2" PathDescription { Self { Name: "IncrBackupImpl2" PathId: 46 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 104 CreateStep: 5000042 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IncrementalBackupConfig { Mode: RESTORE_MODE_INCREMENTAL_BACKUP Consistency: CONSISTENCY_WEAK } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 43 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } } PathId: 46 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:35:58.709365Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:35:58.709684Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl3" took 314us result status StatusSuccess 2026-01-08T15:35:58.710360Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl3" PathDescription { Self { Name: "IncrBackupImpl3" PathId: 49 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000043 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_changeMetadata" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IncrementalBackupConfig { Mode: RESTORE_MODE_INCREMENTAL_BACKUP Consistency: CONSISTENCY_WEAK } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 43 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } } PathId: 49 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbProxy::CreateTopic [GOOD] >> YdbProxy::DescribeConsumer >> YdbProxy::MakeDirectory [GOOD] >> YdbProxy::OAuthToken |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_continuous_backup_reboots/unittest >> YdbProxy::DropTable [GOOD] >> YdbProxy::DescribeTopic |87.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/ut_console/ydb-core-cms-console-ut_console |87.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut_console/ydb-core-cms-console-ut_console |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> KqpService::CloseSessionsWithLoad |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [TA] {RESULT} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut_console/ydb-core-cms-console-ut_console >> YdbProxy::CreateTable [GOOD] >> YdbProxy::CreateCdcStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTable [GOOD] Test command err: 2026-01-08T15:35:56.465711Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014944949077922:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:56.468237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:56.772600Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:56.783060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:56.783192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:56.883077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:56.886517Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014944949077886:2081] 1767886556449392 != 1767886556449395 2026-01-08T15:35:56.891299Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:57.032617Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:57.374995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:57.375018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:57.375026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:57.375111Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:57.478267Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:57.745324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:57.754869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:00.459754Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014962128947960:2501] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2026-01-08T15:36:00.495653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.667443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) 2026-01-08T15:36:00.705046Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014962128948077:2582] txid# 281474976710661, issues: { message: "Can\'t drop unknown column: \'extra\'" severity: 1 } |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:35:40.620760Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:40.755804Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:35:40.756184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:35:40.756383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:41.111146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:41.111275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:41.209827Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886537456316 != 1767886537456320 2026-01-08T15:35:41.220632Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:41.283637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:41.390907Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:35:41.751546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:41.765963Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:41.881259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:41.931164Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:41.932444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:41.932739Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:892:2768] 2026-01-08T15:35:41.933017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:41.989826Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:877:2759], Recipient [1:892:2768]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:41.990285Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:878:2760], Recipient [1:894:2770]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:41.992593Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:41.992734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:41.994434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:35:41.994525Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:35:41.994580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:35:41.994965Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:41.995248Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:41.995313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:918:2768] in generation 1 2026-01-08T15:35:41.995553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:878:2760], Recipient [1:894:2770]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:41.995850Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:894:2770] 2026-01-08T15:35:41.996061Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:35:42.004339Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:878:2760], Recipient [1:894:2770]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:42.005098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:35:42.005203Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:35:42.006719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:35:42.006790Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:35:42.006844Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:35:42.007154Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:35:42.007277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:35:42.007349Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:924:2770] in generation 1 2026-01-08T15:35:42.018306Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.058158Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:35:42.058391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.058502Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:927:2789] 2026-01-08T15:35:42.058548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:35:42.058584Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:35:42.058622Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:35:42.058960Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:892:2768], Recipient [1:892:2768]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.059009Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.059125Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:35:42.059174Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:35:42.059249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:35:42.059307Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:928:2790] 2026-01-08T15:35:42.059334Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:42.059357Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:35:42.059380Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:42.059605Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:894:2770], Recipient [1:894:2770]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.059653Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:35:42.059935Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:35:42.060060Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:35:42.060277Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:35:42.060323Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:42.060360Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:35:42.060436Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:35:42.060471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:35:42.060501Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:35:42.060544Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:35:42.060599Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:35:42.060654Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:35:42.061205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:897:2771], Recipient [1:892:2768]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:42.061255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:35:42.061300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:885:2764], serverId# [1:897:2771], sessionId# [0:0:0] 2026-01-08T15:35:42.061375Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:42.061417Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:42.061446 ... on plan for [0:10] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:35:59.616124Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:10] at 72075186224037888 has finished 2026-01-08T15:35:59.629746Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2026-01-08T15:35:59.629819Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1954: Complete execution for [0:10] at 72075186224037888 on unit DirectOp 2026-01-08T15:35:59.629906Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ... bulk upsert finished with status GENERIC_ERROR 2026-01-08T15:36:00.279934Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:352: actor# [3:68:2115] Handle TEvExecuteKqpTransaction 2026-01-08T15:36:00.280023Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:341: actor# [3:68:2115] TxId# 281474976715662 ProcessProposeKqpTransaction 2026-01-08T15:36:00.304452Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [3:1401:3185], Recipient [3:889:2769]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2026-01-08T15:36:00.304656Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2026-01-08T15:36:00.304724Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037888 CompleteEdge# v23000/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v23000/18446744073709551615 ImmediateWriteEdgeReplied# v23000/18446744073709551615 2026-01-08T15:36:00.304779Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037888 changed HEAD read to non-repeatable v23000/18446744073709551615 2026-01-08T15:36:00.304882Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:11] at 72075186224037888 on unit CheckRead 2026-01-08T15:36:00.305001Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:11] at 72075186224037888 is Executed 2026-01-08T15:36:00.305055Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:11] at 72075186224037888 executing on unit CheckRead 2026-01-08T15:36:00.305100Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:11] at 72075186224037888 to execution unit BuildAndWaitDependencies 2026-01-08T15:36:00.305138Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:11] at 72075186224037888 on unit BuildAndWaitDependencies 2026-01-08T15:36:00.305196Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:11] at 72075186224037888 2026-01-08T15:36:00.305240Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:11] at 72075186224037888 is Executed 2026-01-08T15:36:00.305268Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:11] at 72075186224037888 executing on unit BuildAndWaitDependencies 2026-01-08T15:36:00.305291Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:11] at 72075186224037888 to execution unit ExecuteRead 2026-01-08T15:36:00.305317Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:11] at 72075186224037888 on unit ExecuteRead 2026-01-08T15:36:00.305444Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2026-01-08T15:36:00.305743Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037888 Complete read# {[3:1401:3185], 0} after executionsCount# 1 2026-01-08T15:36:00.305823Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037888 read iterator# {[3:1401:3185], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:36:00.305954Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037888 read iterator# {[3:1401:3185], 0} finished in read 2026-01-08T15:36:00.306030Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:11] at 72075186224037888 is Executed 2026-01-08T15:36:00.306059Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:11] at 72075186224037888 executing on unit ExecuteRead 2026-01-08T15:36:00.306090Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:11] at 72075186224037888 to execution unit CompletedOperations 2026-01-08T15:36:00.306118Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:11] at 72075186224037888 on unit CompletedOperations 2026-01-08T15:36:00.306166Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:11] at 72075186224037888 is Executed 2026-01-08T15:36:00.306191Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:11] at 72075186224037888 executing on unit CompletedOperations 2026-01-08T15:36:00.306228Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:11] at 72075186224037888 has finished 2026-01-08T15:36:00.306275Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2026-01-08T15:36:00.318565Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2026-01-08T15:36:00.320595Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [3:1401:3185], Recipient [3:889:2769]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:36:00.320691Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } 2026-01-08T15:36:00.654568Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:352: actor# [3:68:2115] Handle TEvExecuteKqpTransaction 2026-01-08T15:36:00.654662Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:341: actor# [3:68:2115] TxId# 281474976715663 ProcessProposeKqpTransaction 2026-01-08T15:36:00.661601Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [3:1429:3209], Recipient [3:1114:2930]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 44 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 1 2026-01-08T15:36:00.661840Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2026-01-08T15:36:00.661925Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2377: GetMvccTxVersion at 72075186224037889 CompleteEdge# v8000/281474976710759 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v7000/18446744073709551615 ImmediateWriteEdgeReplied# v7000/18446744073709551615 2026-01-08T15:36:00.661998Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2800: 72075186224037889 changed HEAD read to non-repeatable v23000/18446744073709551615 2026-01-08T15:36:00.662121Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2026-01-08T15:36:00.662261Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:36:00.662336Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2026-01-08T15:36:00.662417Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2026-01-08T15:36:00.662466Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2026-01-08T15:36:00.662527Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2026-01-08T15:36:00.662585Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:36:00.662615Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2026-01-08T15:36:00.662638Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:36:00.662664Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:36:00.662807Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 44 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2026-01-08T15:36:00.663208Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[3:1429:3209], 0} after executionsCount# 1 2026-01-08T15:36:00.663312Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[3:1429:3209], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:36:00.663484Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[3:1429:3209], 0} finished in read 2026-01-08T15:36:00.663572Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:36:00.663607Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:36:00.663637Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:36:00.663685Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:36:00.663742Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:4] at 72075186224037889 is Executed 2026-01-08T15:36:00.663765Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:36:00.663795Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:4] at 72075186224037889 has finished 2026-01-08T15:36:00.663852Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:36:00.663993Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:36:00.665577Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [3:1429:3209], Recipient [3:1114:2930]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2026-01-08T15:36:00.665651Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_consistent_copy_tables/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |87.9%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_upload_rows/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/build_index/ut/unittest >> TTxDataShardValidateUniqueIndexScan::RunScan 2026-01-08 15:35:50,846 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2026-01-08 15:35:51,046 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 53079 58.8M 58.2M 32.8M test_tool run_ut @/home/runner/.ya/build/build_root/663n/003a27/ydb/core/tx/datashard/build_index/ut/test-results/unittest/testing_out_stuff/test_tool.args 53205 1.8G 1.8G 1.8G └─ ydb-core-tx-datashard-build_index-ut --trace-path-append /home/runner/.ya/build/build_root/663n/003a27/ydb/core/tx/datashard/build_index/ut/test-results/unittest/ytest.re Test command err: 2026-01-08T15:26:00.896537Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593012383289092362:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:26:00.897599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:26:01.156497Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:26:01.626272Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:26:01.633549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:26:01.633676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:26:01.690919Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593012383289092257:2081] 1767885960835971 != 1767885960835974 2026-01-08T15:26:01.725564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:26:01.729347Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:26:01.801249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:26:01.825686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:26:01.847297Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7593012387584060334:2283] 2026-01-08T15:26:01.847564Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:26:01.861835Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:26:01.861934Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:26:01.863897Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:26:01.863942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:26:01.863983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:26:01.865048Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:26:01.865172Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:26:01.865227Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:7593012387584060349:2283] in generation 1 2026-01-08T15:26:01.866889Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:26:01.905116Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:26:01.905241Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:26:01.905276Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:7593012387584060351:2284] 2026-01-08T15:26:01.905288Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:26:01.905295Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:26:01.905302Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:01.905791Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:26:01.905868Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:26:01.905907Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:26:01.905935Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:26:01.905953Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:26:01.905983Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:26:01.906019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:7593012387584060332:2494], serverId# [1:7593012387584060335:2495], sessionId# [0:0:0] 2026-01-08T15:26:01.906087Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:26:01.906954Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:26:01.907029Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2026-01-08T15:26:01.907546Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:26:01.908477Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:26:01.909957Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:26:01.910034Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:26:01.911736Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:7593012387584060396:2517], serverId# [1:7593012387584060397:2518], sessionId# [0:0:0] 2026-01-08T15:26:01.916301Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1767885961961 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1767885961961 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:26:01.916321Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:01.916431Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:26:01.916489Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:26:01.916503Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:26:01.916520Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1767885961961:281474976710657] in PlanQueue unit at 72075186224037888 2026-01-08T15:26:01.916699Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1767885961961:281474976710657 keys extracted: 0 2026-01-08T15:26:01.916799Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:26:01.916884Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:26:01.916907Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:26:01.919884Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:26:01.920365Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:26:01.922562Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1767885961960 2026-01-08T15:26:01.922606Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:01.922646Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1767885961961} 2026-01-08T15:26:01.922683Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:26:01.922706Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:26:01.922720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:26:01.922748Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:26:01.922787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [1767885961961 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7593012387584059907:2149], exec latency: 3 ms, propose latency: 5 ms 2026-01-08T15:26:01.922831Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2026-01-08T15:26:01.922861Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:26:01.922958Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1767885961968 2026-01-08T15:26:01.928328Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2026-01-08T15:26:01.928386Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:26:01.966296Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvent ... .cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:50.139122Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [50:1117:2927], serverId# [50:1123:2930], sessionId# [0:0:0] 2026-01-08T15:35:50.139436Z node 50 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2026-01-08T15:35:50.139966Z node 50 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037889 txId 281474976715662 ssId 72057594046644480 seqNo 2:2 2026-01-08T15:35:50.140110Z node 50 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715662 at tablet 72075186224037889 2026-01-08T15:35:50.141144Z node 50 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:50.155119Z node 50 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2026-01-08T15:35:50.155316Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037889 not sending time cast registration request in state WaitScheme 2026-01-08T15:35:50.294056Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [50:1144:2945], serverId# [50:1146:2947], sessionId# [0:0:0] 2026-01-08T15:35:50.296777Z node 50 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715662 at step 3000 at tablet 72075186224037889 { Transactions { TxId: 281474976715662 AckTo { RawX1: 0 RawX2: 0 } } Step: 3000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2026-01-08T15:35:50.296906Z node 50 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:50.297237Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:50.297353Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:35:50.297452Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3000:281474976715662] in PlanQueue unit at 72075186224037889 2026-01-08T15:35:50.298074Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3000:281474976715662 keys extracted: 0 2026-01-08T15:35:50.308888Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:35:50.310490Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:50.310637Z node 50 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 43] schema version# 1 2026-01-08T15:35:50.311766Z node 50 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:35:50.312591Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:50.315241Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2026-01-08T15:35:50.315359Z node 50 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:50.316729Z node 50 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3000} 2026-01-08T15:35:50.316884Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:50.319458Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:50.319541Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:35:50.319684Z node 50 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037889 2026-01-08T15:35:50.319840Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [3000 : 281474976715662] from 72075186224037889 at tablet 72075186224037889 send result to client [50:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:35:50.319953Z node 50 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715662 state Ready TxInFly 0 2026-01-08T15:35:50.320161Z node 50 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:50.323234Z node 50 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:35:50.323551Z node 50 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:35:50.325358Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 3000 2026-01-08T15:35:50.325911Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2026-01-08T15:35:50.326030Z node 50 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2026-01-08T15:35:50.340139Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [50:1172:2967], serverId# [50:1173:2968], sessionId# [0:0:0] 2026-01-08T15:35:50.340616Z node 50 :BUILD_INDEX NOTICE: unique_index.cpp:246: Starting TValidateUniqueIndexScan TabletId: 72075186224037889 Id: 1 TabletId: 72075186224037889 OwnerId: 72057594046644480 PathId: 43 IndexColumns: "key_part1" IndexColumns: "key_part2" SeqNoGeneration: 42 SeqNoRound: 45 2026-01-08T15:35:50.340792Z node 50 :BUILD_INDEX INFO: unique_index.cpp:51: Create TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.341756Z node 50 :BUILD_INDEX INFO: unique_index.cpp:58: Prepare TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.341905Z node 50 :BUILD_INDEX TRACE: unique_index.cpp:63: Seek 0 TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.342077Z node 50 :BUILD_INDEX TRACE: unique_index.cpp:134: Exhausted TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.342357Z node 50 :BUILD_INDEX NOTICE: unique_index.cpp:124: Done TValidateUniqueIndexScan Id: 1 Status: DONE Issues: Id: 1 TabletId: 72075186224037889 Status: DONE RequestSeqNoGeneration: 42 RequestSeqNoRound: 45 MeteringStats { ReadRows: 0 ReadBytes: 0 } 2026-01-08T15:35:50.343553Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:50.343656Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:50.343760Z node 50 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:50.343864Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:35:50.545245Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [50:1193:2982], serverId# [50:1194:2983], sessionId# [0:0:0] 2026-01-08T15:35:50.545920Z node 50 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037889 2026-01-08T15:35:50.546139Z node 50 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037889, row count=2 2026-01-08T15:35:50.557860Z node 50 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:35:50.565977Z node 50 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037889, clientId# [50:1200:2988], serverId# [50:1201:2989], sessionId# [0:0:0] 2026-01-08T15:35:50.566477Z node 50 :BUILD_INDEX NOTICE: unique_index.cpp:246: Starting TValidateUniqueIndexScan TabletId: 72075186224037889 Id: 1 TabletId: 72075186224037889 OwnerId: 72057594046644480 PathId: 43 IndexColumns: "key_part1" IndexColumns: "key_part2" SeqNoGeneration: 42 SeqNoRound: 46 2026-01-08T15:35:50.566648Z node 50 :BUILD_INDEX INFO: unique_index.cpp:51: Create TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.567460Z node 50 :BUILD_INDEX INFO: unique_index.cpp:58: Prepare TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.567577Z node 50 :BUILD_INDEX TRACE: unique_index.cpp:63: Seek 0 TValidateUniqueIndexScan Id: 1 Status: INVALID Issues: 2026-01-08T15:35:50.579024Z node 50 :BUILD_INDEX ERROR: unique_index.cpp:126: Failed TValidateUniqueIndexScan Id: 1 Status: BUILD_ERROR Issues: {
: Error: Duplicate key found: (key_part1=1, key_part2=1) } Id: 1 TabletId: 72075186224037889 Status: BUILD_ERROR Issues { message: "Duplicate key found: (key_part1=1, key_part2=1)" severity: 1 } RequestSeqNoGeneration: 42 RequestSeqNoRound: 46 MeteringStats { ReadRows: 2 ReadBytes: 42 } 2026-01-08T15:35:50.580146Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:35:50.580263Z node 50 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:35:50.580369Z node 50 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:35:50.580498Z node 50 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/10669990147/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/663n/003a27/ydb/core/tx/datashard/build_index/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1772, in main res.wait(check_exit_code=False, timeout=current_run_test_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/10669990147/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/663n/003a27/ydb/core/tx/datashard/build_index/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |88.0%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/build_index/ut/unittest |88.0%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::StaticCreds [GOOD] |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::DescribeTable [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 [FAIL] >> BasicUsage::WriteSessionSwitchDatabases [GOOD] >> YdbProxy::OAuthToken [GOOD] >> YdbProxy::DropTopic [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 >> YdbProxy::DescribeConsumer [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |88.0%| [TA] $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} |88.0%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |88.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::StaticCreds [GOOD] Test command err: 2026-01-08T15:35:56.173455Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014941553259095:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:56.173561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:56.237976Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:56.558391Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:56.573963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:56.574137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:56.625610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:56.697329Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:56.703174Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014941553259070:2081] 1767886556172352 != 1767886556172355 2026-01-08T15:35:56.776474Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:57.115245Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:57.115277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:57.115286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:57.115409Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:57.190545Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:57.489751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:57.497496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:57.562545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2026-01-08T15:35:57.628783Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014945848227255:2527] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2026-01-08T15:36:00.791692Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014958304618467:2150];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:00.794303Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:00.866840Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:00.997184Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:00.999256Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014958304618343:2081] 1767886560739089 != 1767886560739092 2026-01-08T15:36:01.007438Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:01.007518Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:01.033023Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:01.102774Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:01.459875Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:01.459903Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:01.459910Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:01.460001Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:01.778427Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:01.779199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:01.795402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:01.855346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:36:02.244590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::DescribeTopic [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> YdbProxy::CopyTables [GOOD] >> YdbProxy::AlterTopic >> TModificationsValidatorTests::TestIsValidationRequired_NONE [GOOD] >> TConsoleTests::TestRestartConsoleAndPools >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] |88.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleConfigHelpersTests::TestConfigCourier >> TConsoleConfigTests::TestModifyConfigItem |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.0%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TModificationsValidatorTests::TestIsValidationRequired_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_DOMAIN >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_NONE [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeThrottler >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_DOMAIN [GOOD] >> TConsoleTests::TestSetDefaultStorageUnitsQuota >> TConsoleConfigHelpersTests::TestConfigCourier [GOOD] >> PgCatalog::InformationSchema [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS_AND_NODE_TYPES [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriber >> PgCatalog::CheckSetConfig >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Write_And_Read_Small_Messages_2 [GOOD] Test command err: 2026-01-08T15:31:40.595364Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013843451960814:2151];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:40.595978Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:40.647416Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:31:40.649786Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:31:41.334471Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:41.482245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:41.490679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:41.504008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:41.614921Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:41.637811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013843451960684:2081] 1767886300487507 != 1767886300487510 2026-01-08T15:31:41.645319Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:41.659938Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:41.902949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0022f4/r3tmp/yandex4ZXGdw.tmp 2026-01-08T15:31:41.902982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0022f4/r3tmp/yandex4ZXGdw.tmp 2026-01-08T15:31:41.903142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0022f4/r3tmp/yandex4ZXGdw.tmp 2026-01-08T15:31:41.903214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:41.982859Z INFO: TTestServer started on Port 11248 GrpcPort 10374 PQClient connected to localhost:10374 2026-01-08T15:31:42.647198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:42.690484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:31:42.881999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:43.239594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2026-01-08T15:31:43.260465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2026-01-08T15:31:45.575934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013843451960814:2151];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:45.576003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:46.340604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013869221765489:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:46.340743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:46.341301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013869221765526:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:46.341348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013869221765527:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:46.341610Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:46.351084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:46.380202Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013869221765530:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2026-01-08T15:31:46.493194Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013869221765583:2656] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 45], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:31:47.690622Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593013869221765593:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:31:47.693164Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=ZGJiNDk0ZWMtZDlhZjNkYTUtNTU5YWVjYzEtNGUwM2E3OGQ=, ActorId: [1:7593013869221765487:2330], ActorState: ExecuteState, LegacyTraceId: 01kef3rf104s16rgfckks14gqz, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:31:47.695918Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:31:47.697517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.775920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:47.932326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7593013877811700478:2840] === CheckClustersList. Ok 2026-01-08T15:31:53.469099Z :WriteToTopic_Demo_41_Table INFO: TTopicSdkTestSetup started 2026-01-08T15:31:53.507197Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:133: new create topic request 2026-01-08T15:31:53.540823Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1657: [72075186224037893][] pipe [1:7593013899286537148:2934] connected; active server actors: 1 2026-01-08T15:31:53.541073Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1521: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2026-01-08T15:31:53.542037Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3023: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2026-01-08T15:31:53.543027Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:756: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2026-01-08T15:31:53.543126Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:142: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, ... 0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.435051Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.435067Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.435087Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.435103Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.435174Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.435189Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.435202Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.435215Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.435226Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.538493Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.538524Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.538541Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.538561Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.538577Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.538649Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.538661Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.538673Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.538688Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.538698Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.594170Z :INFO: [/Root] [/Root] [60d48a56-d2bb9c74-2c0d381e-aaf53bd5] Closing read session. Close timeout: 0.000000s 2026-01-08T15:36:01.594239Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:319:320 2026-01-08T15:36:01.594307Z :INFO: [/Root] [/Root] [60d48a56-d2bb9c74-2c0d381e-aaf53bd5] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2039 BytesRead: 20480000 MessagesRead: 320 BytesReadCompressed: 20480000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2026-01-08T15:36:01.594435Z :NOTICE: [/Root] [/Root] [60d48a56-d2bb9c74-2c0d381e-aaf53bd5] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2026-01-08T15:36:01.594485Z :DEBUG: [/Root] [/Root] [60d48a56-d2bb9c74-2c0d381e-aaf53bd5] [] Abort session to cluster 2026-01-08T15:36:01.595048Z :DEBUG: [/Root] 0x00007D7D2F521190 TDirectReadSessionManager ServerSessionId=test-consumer_13_1_9594928556693150086_v1 Close 2026-01-08T15:36:01.595427Z :DEBUG: [/Root] 0x00007D7D2F521190 TDirectReadSessionManager ServerSessionId=test-consumer_13_1_9594928556693150086_v1 Close 2026-01-08T15:36:01.595559Z :NOTICE: [/Root] [/Root] [60d48a56-d2bb9c74-2c0d381e-aaf53bd5] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2026-01-08T15:36:01.597047Z node 13 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 grpc read done: success# 0, data# { } 2026-01-08T15:36:01.597087Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 grpc read failed 2026-01-08T15:36:01.597121Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 grpc closed 2026-01-08T15:36:01.597161Z node 13 :PQ_READ_PROXY INFO: read_session_actor.cpp:383: session cookie 1 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 is DEAD 2026-01-08T15:36:01.598046Z node 13 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:84: Direct read proxy [13:7593014954558036731:2529]: session cookie 2 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 grpc read done: success# 0, data# { } 2026-01-08T15:36:01.598069Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:87: Direct read proxy [13:7593014954558036731:2529]: session cookie 2 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1grpc read failed 2026-01-08T15:36:01.598095Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:66: Direct read proxy [13:7593014954558036731:2529]: session cookie 2 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 grpc closed 2026-01-08T15:36:01.598115Z node 13 :PQ_READ_PROXY INFO: direct_read_actor.cpp:167: Direct read proxy [13:7593014954558036731:2529]: session cookie 2 consumer test-consumer session test-consumer_13_1_9594928556693150086_v1 proxy is DEAD 2026-01-08T15:36:01.598845Z node 13 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1661: [72075186224037895][topic_A] pipe [13:7593014954558036723:2524] disconnected. 2026-01-08T15:36:01.598871Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1672: [72075186224037895][topic_A] pipe [13:7593014954558036723:2524] disconnected; active server actors: 1 2026-01-08T15:36:01.598894Z node 13 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1681: [72075186224037895][topic_A] pipe [13:7593014954558036723:2524] client test-consumer disconnected session test-consumer_13_1_9594928556693150086_v1 2026-01-08T15:36:01.602856Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2321: [PQ: 72075186224037894] Destroy direct read session test-consumer_13_1_9594928556693150086_v1 2026-01-08T15:36:01.602899Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037894] server disconnected, pipe [13:7593014954558036726:2527] destroyed 2026-01-08T15:36:01.602970Z node 13 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_13_1_9594928556693150086_v1 2026-01-08T15:36:01.642532Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.642567Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.642585Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.642604Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.642620Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.642679Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.642691Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.642701Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.642717Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.642726Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.746552Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.746589Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.746607Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.746629Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.746646Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.746705Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.746733Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.746745Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.746760Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.746770Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.846669Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.846701Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.846717Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.846736Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.846754Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:01.846808Z node 13 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037894][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:01.846819Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.846828Z node 13 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037894][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:01.846840Z node 13 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037894][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:01.846850Z node 13 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037894][Partition][0][StateIdle] Try persist |88.0%| [TM] {RESULT} ydb/core/tx/datashard/build_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DropTopic [GOOD] Test command err: 2026-01-08T15:35:55.809867Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014938310849019:2142];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:55.810133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:55.879529Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:56.144251Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:56.150038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:56.150169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:56.211935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:56.240599Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014938310848911:2081] 1767886555782380 != 1767886555782383 2026-01-08T15:35:56.272018Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:56.364453Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:56.621950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:56.621986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:56.622006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:56.622118Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:56.808618Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:56.918528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:56.931905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:01.686496Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:01.686612Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:01.912481Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:01.915828Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014963085560805:2081] 1767886561520074 != 1767886561520077 2026-01-08T15:36:01.949867Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:01.969627Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:01.969712Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:01.997260Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.399552Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:02.550422Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:02.762947Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:02.762970Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:02.762977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:02.763060Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.296222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:03.637346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:422) 2026-01-08T15:36:03.736390Z node 2 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2026-01-08T15:36:03.736424Z node 2 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2026-01-08T15:36:03.758042Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014971675496398:2599] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::OAuthToken [GOOD] Test command err: 2026-01-08T15:35:57.771630Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014946888686205:2173];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:57.771713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:58.412996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:58.413090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:58.414124Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:58.425121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:58.692284Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:58.697821Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:58.774992Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.993315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:58.993336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:58.993343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:58.993453Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:59.272993Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:59.312147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:02.339297Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014969142633237:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:02.339904Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:02.608410Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:02.608516Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:02.609240Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:02.615960Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.635397Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:02.877735Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:03.074981Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:03.075004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:03.075011Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:03.075072Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.402508Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:03.515924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:03.531203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:03.601019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 >> YdbProxy::CreateCdcStream [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeConsumer [GOOD] Test command err: 2026-01-08T15:35:57.331037Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014949355621954:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:57.331097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:57.697701Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:57.721292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:57.721438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:57.728248Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014949355621911:2081] 1767886557330032 != 1767886557330035 2026-01-08T15:35:57.749612Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:57.761682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:57.873819Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:58.119909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:58.119932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:58.119940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:58.120044Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:58.362754Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.661901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:58.674753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:58.775900Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014953650590049:2493] txid# 281474976710658, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } 2026-01-08T15:36:02.253897Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014970856964754:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:02.253974Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:02.325137Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:02.431687Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:02.431791Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:02.435768Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:02.495536Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.618188Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:02.975116Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:02.975139Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:02.975147Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:02.975239Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.281156Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:03.312102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut_console/unittest >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2026-01-08T15:33:32.330403Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1767886412330368 2026-01-08T15:33:32.827951Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014324504646002:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:32.828000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.894437Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:33:32.924039Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:32.927974Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014326423985484:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:32.928020Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:33:32.991286Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2026-01-08T15:33:33.304409Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.334744Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:33:33.525974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.526068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.528506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:33:33.528584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:33:33.586864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.675160Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:33:33.683094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:33:33.721720Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.796069Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:33:33.816959Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:33:33.852326Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639238 Duration# 0.008721s 2026-01-08T15:33:33.888164Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:33.986779Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:33:34.081105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/663n/0012f6/r3tmp/yandexsrTbWh.tmp 2026-01-08T15:33:34.081141Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/663n/0012f6/r3tmp/yandexsrTbWh.tmp 2026-01-08T15:33:34.081275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/663n/0012f6/r3tmp/yandexsrTbWh.tmp 2026-01-08T15:33:34.081354Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:33:34.187367Z INFO: TTestServer started on Port 28725 GrpcPort 2065 PQClient connected to localhost:2065 2026-01-08T15:33:34.602665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:33:37.830290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014324504646002:2201];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.830378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:37.930725Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7593014326423985484:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:33:37.930796Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:33:38.181084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014350274450955:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.181244Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014350274450977:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.181324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.186414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:33:38.186422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014350274450980:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.186515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:33:38.289250Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014350274450979:2337], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2026-01-08T15:33:38.604718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:33:38.605647Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014350274451070:2978] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 44], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:33:38.615814Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [2:7593014352193789602:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:38.618763Z node 2 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=2&id=MTJmNzg0LWY1N2JhY2UtYmJlYmFiZWEtYTYwNzUwMGU=, ActorId: [2:7593014352193789562:2303], ActorState: ExecuteState, LegacyTraceId: 01kef3vw99etmge8mmh7e1w4b1, ReplyQueryCompileError, remove tx status# SCHEME_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } tx_id# trace_id# 2026-01-08T15:33:38.621239Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2026-01-08T15:33:38.630438Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [1:7593014350274451122:2348], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2026-01-08T15:33:38.632501Z node 1 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=1&id=YTBiOTk1NDgtZDg0NzIwNzMtNTkxMDQyY2UtYTM5NDYxNjc=, ActorId: [1:7593014350274450953:2330], ActorState: ExecuteState, LegacyTraceId: 01kef3vw7q6h0j0kscg4x1bb4a, ReplyQueryCompileError, remove tx status# SCHEME_ER ... ad.h:275: [72075186224037892][PQCacheProxy]CacheProxy. Passthrough write request to KV 2026-01-08T15:36:02.953578Z node 4 :PERSQUEUE DEBUG: read.h:313: [72075186224037892][PQCacheProxy]CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2026-01-08T15:36:02.963122Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:311: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [4:7593014450544735469:2386] 2026-01-08T15:36:02.963197Z node 4 :PERSQUEUE DEBUG: partition.cpp:2153: [72075186224037892][Partition][0][StateIdle] Received TEvKeyValue::TEvResponse 2026-01-08T15:36:02.963269Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:572: [72075186224037892][Partition][0][StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2026-01-08T15:36:02.963308Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:60: [72075186224037892][Partition][0][StateIdle] TPartition::ReplyWrite. Partition: 0 2026-01-08T15:36:02.963345Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:365: [72075186224037892][Partition][0][StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2026-01-08T15:36:02.963520Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:02.963539Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.963553Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:02.963570Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.963584Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:02.963616Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:193: [72075186224037892][Partition][0][StateIdle] No data for blobs compaction 2026-01-08T15:36:02.963675Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1199: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2026-01-08T15:36:02.963708Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:182: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2026-01-08T15:36:02.964186Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:129: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 160 2026-01-08T15:36:02.965323Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2026-01-08T15:36:02.966702Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2026-01-08T15:36:02.966874Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 15000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2026-01-08T15:36:02.966906Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2026-01-08T15:36:02.966936Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session: acknoledged message 1 2026-01-08T15:36:02.969282Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:02.969319Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.969332Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:02.969359Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.969379Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:02.971221Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|6686b94f-6c01467e-662854b0-239277e6_0 grpc read done: success: 0 data: 2026-01-08T15:36:02.971251Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|6686b94f-6c01467e-662854b0-239277e6_0 grpc read failed 2026-01-08T15:36:02.971275Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|6686b94f-6c01467e-662854b0-239277e6_0 grpc closed 2026-01-08T15:36:02.971291Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|6686b94f-6c01467e-662854b0-239277e6_0 is DEAD 2026-01-08T15:36:02.971763Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:562: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2026-01-08T15:36:02.974966Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2026-01-08T15:36:02.975072Z :ERROR: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2026-01-08T15:36:02.975118Z :ERROR: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2026-01-08T15:36:02.975157Z :INFO: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session will now close 2026-01-08T15:36:02.975249Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session: aborting 2026-01-08T15:36:02.975042Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2809: [PQ: 72075186224037892] server disconnected, pipe [3:7593014882234620648:3165] destroyed 2026-01-08T15:36:02.975094Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:140: [72075186224037892][Partition][0][StateIdle] TPartition::DropOwner. 2026-01-08T15:36:02.975125Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:02.975142Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.975154Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:02.975172Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:02.975185Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:03.017708Z :DEBUG: [/Root] TraceId [] SessionId [src_id|6686b94f-6c01467e-662854b0-239277e6_0] MessageGroupId [src_id] Write session: destroy 2026-01-08T15:36:03.070626Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:03.070665Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.070679Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:03.070702Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.070717Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:03.174459Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:03.174502Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.174514Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:03.174540Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.174556Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:03.274737Z node 4 :PERSQUEUE DEBUG: partition.cpp:2329: [72075186224037892][Partition][0][StateIdle] Process user action and tx events 2026-01-08T15:36:03.274771Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.274783Z node 4 :PERSQUEUE DEBUG: partition.cpp:2337: [72075186224037892][Partition][0][StateIdle] Process user action and tx pending commits 2026-01-08T15:36:03.274802Z node 4 :PERSQUEUE DEBUG: partition.cpp:2388: [72075186224037892][Partition][0][StateIdle] Events: 0, PendingCommits: 0, PendingWrites: 0 2026-01-08T15:36:03.274815Z node 4 :PERSQUEUE DEBUG: partition.cpp:2347: [72075186224037892][Partition][0][StateIdle] Try persist 2026-01-08T15:36:03.854153Z node 3 :KQP_EXECUTER ERROR: {KQPEX@kqp_executer_impl.h:1259} ActorId: [3:7593014972428934685:3305] TxId: 281474976710762. Ctx: { TraceId: 01kef409vy19mfn0r21p2zp8sm, Database: /Root, SessionId: ydb://session/3?node_id=3&id=NmVkNDMyOS1lN2M1ZjA5Ny1lNDhmZTM1MC04Nzg3ZWQxYQ==, PoolId: default, IsStreamingQuery: 0}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 trace_id# 2026-01-08T15:36:03.854360Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1264: SelfId: [3:7593014972428934689:3305], TxId: 281474976710762, task: 3. Ctx: { CheckpointId : . TraceId : 01kef409vy19mfn0r21p2zp8sm. CustomerSuppliedId : . RunScriptActorId : [0:0:0]. PoolId : default. SessionId : ydb://session/3?node_id=3&id=NmVkNDMyOS1lN2M1ZjA5Ny1lNDhmZTM1MC04Nzg3ZWQxYQ==. CurrentExecutionId : . Database : /Root. DatabaseId : /Root. }. Handle abort execution event from: [3:7593014972428934685:3305], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2026-01-08T15:36:04.859117Z node 3 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:3171} SessionId: ydb://session/3?node_id=3&id=NmVkNDMyOS1lN2M1ZjA5Ny1lNDhmZTM1MC04Nzg3ZWQxYQ==, ActorId: [3:7593014972428934653:3305], ActorState: ExecuteState, LegacyTraceId: 01kef409vy19mfn0r21p2zp8sm, Create QueryResponse for error on request, msg: status# UNAVAILABLE issues# { message: "Kikimr cluster or one of its subsystems was unavailable." issue_code: 2005 severity: 1 issues { message: "Failed to send EvStartKqpTasksRequest because node is unavailable: 4" severity: 1 } } trace_id# 2026-01-08T15:36:04.860741Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Kikimr cluster or one of its subsystems was unavailable." issue_code: 2005 severity: 1 issues { message: "Failed to send EvStartKqpTasksRequest because node is unavailable: 4" severity: 1 } } TxMeta { id: "01kef40ag2fxdqybaqbqc9kzzk" } } YdbStatus: UNAVAILABLE ConsumedRu: 424 } |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |88.1%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTopic [GOOD] Test command err: 2026-01-08T15:35:56.954396Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014944422017019:2075];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:56.954487Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:57.022407Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:57.426419Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:57.436084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:57.436185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:57.476508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:57.578277Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:57.657301Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:57.845097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:57.845130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:57.845139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:57.845255Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:57.986963Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.287459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:58.307242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:01.109467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:01.431295Z node 1 :HIVE WARN: hive_impl.cpp:519: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2026-01-08T15:36:01.441739Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014965896854481:2600] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2026-01-08T15:36:02.462438Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014967299536722:2074];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:02.462498Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:02.514005Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:02.717104Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:02.732036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:02.732142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:02.743346Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.818438Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:03.350803Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:03.350829Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:03.350836Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:03.350934Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.474835Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:03.783296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:03.789539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTable [GOOD] Test command err: 2026-01-08T15:35:56.815433Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014942730619822:2087];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:56.815484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:56.890876Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:57.439126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:57.439257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:57.444613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:57.490344Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:57.498909Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:57.722951Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:57.725706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:57.725717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:57.725738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:57.725878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:57.830764Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.082821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:58.200900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2026-01-08T15:36:01.575254Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014965882135380:2073];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:01.596484Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:01.682452Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:01.792800Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:01.794469Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014965882135346:2081] 1767886561553993 != 1767886561553996 2026-01-08T15:36:01.902920Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:01.902999Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:01.911077Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:01.940619Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:02.370593Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:02.370616Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:02.370626Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:02.370703Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:02.598534Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:02.896285Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:02.903478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:05.673246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/ut_console/unittest |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |88.1%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest |88.2%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TConsoleConfigTests::TestModifyConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItem >> TJaegerTracingConfiguratorTests::RequestTypeThrottler [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeSampler >> TConsoleTests::TestCreateSharedTenant >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest |88.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] |88.2%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TConsoleTests::TestCreateTenant >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |88.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |88.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::CreateCdcStream [GOOD] Test command err: 2026-01-08T15:35:57.438304Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014948234209188:2262];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:57.441785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:57.473375Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:35:57.818435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:57.818594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:57.909240Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:57.921964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:58.008974Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:58.097676Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:58.344291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:58.344326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:58.344338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:58.344419Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:58.402983Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.957930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:58.973691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:36:01.934532Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014965414079031:2505] txid# 281474976715658, issues: { message: "Column key has wrong key type Float" severity: 1 } 2026-01-08T15:36:01.976195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:02.249469Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014969709046413:2565] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 38], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:03.482943Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:03.483975Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593014972889004203:2265];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:03.484161Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:03.649777Z node 2 :BS_NODE WARN: {NWDC01@distconf.cpp:423} StateFunc too long Type# 268639258 Duration# 0.007233s 2026-01-08T15:36:03.664325Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:03.821953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:03.822043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:03.822209Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:03.834472Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014972889003954:2081] 1767886563420252 != 1767886563420255 2026-01-08T15:36:03.861339Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:03.878434Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:04.263072Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:04.263102Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:04.263110Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:04.263229Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:04.466587Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:04.754229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:04.761273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2026-01-08T15:36:07.700591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.902029Z node 2 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][2:7593014990068874211:2334] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:40:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2026-01-08T15:36:07.938854Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014990068874261:2644] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/table/updates\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 39], type: EPathTypeCdcStream, state: EPathStateNoChanges)" severity: 1 } |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest >> KqpBatchUpdate::ManyPartitions_1 [GOOD] >> TConsoleConfigTests::TestRemoveConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItems |88.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |88.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> TJaegerTracingConfiguratorTests::RequestTypeSampler [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |88.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mon/audit/ut/ydb-core-mon-audit-ut |88.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mon/audit/ut/ydb-core-mon-audit-ut |88.3%| [LD] {RESULT} $(B)/ydb/core/mon/audit/ut/ydb-core-mon-audit-ut >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions >> TConsoleConfigHelpersTests::TestConfigSubscriber [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant >> HttpRequest::ProbeBaseStatsServerless >> BasicStatistics::NotFullStatisticsColumnshard |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::AnalyzeServerless >> BsControllerConfig::MoveGroups [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::SimpleColumnStatistics >> TConsoleConfigTests::TestRemoveConfigItems [GOOD] >> TConsoleConfigTests::TestValidation |88.3%| [TA] $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TA] {BAZEL_UPLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 >> YdbProxy::AlterTopic [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope [GOOD] >> HttpRequest::Probe >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions [GOOD] >> TJaegerTracingConfiguratorTests::ThrottlingByDb >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> TConsoleConfigTests::TestValidation [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> TConsoleTests::TestRestartConsoleAndPools [GOOD] >> TJaegerTracingConfiguratorTests::ThrottlingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SamplingByDb >> BasicStatistics::TwoServerlessTwoSharedDbs >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest |88.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTopic [GOOD] Test command err: 2026-01-08T15:35:56.778972Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014943926393335:2076];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:56.787123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:57.150573Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:57.293001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:57.293121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:57.311274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014943926393295:2081] 1767886556763992 != 1767886556763995 2026-01-08T15:35:57.328895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:57.330238Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:57.376793Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:57.646031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:57.646053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:57.646059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:57.646155Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:57.807980Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:58.105730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:58.118757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:00.925866Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014961106263372:2504] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2026-01-08T15:36:00.958201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:02.380958Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:02.381125Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:02.390501Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014969857206688:2081] 1767886562175844 != 1767886562175847 2026-01-08T15:36:02.416866Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:02.420714Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:02.420779Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:02.426196Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.623015Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:02.903169Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:02.903191Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:02.903198Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:02.903278Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.190352Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:03.362856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:03.371311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:06.435673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:06.488997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.503631Z node 3 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7593014991992517171:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:07.504243Z node 3 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:07.594570Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:07.697753Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:07.699472Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [3:7593014991992517147:2081] 1767886567500907 != 1767886567500910 2026-01-08T15:36:07.727053Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:07.727154Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:07.741803Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:07.818381Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:08.082928Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:08.082951Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:08.082958Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:08.083036Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:08.393523Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:08.533488Z node 3 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:08.538759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:311) 2026-01-08T15:36:08.582887Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:7593014996287485427:2583] txid# 281474976710660, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 19848, MsgBus: 13007 2026-01-08T15:30:20.528997Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013499828858206:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:20.529048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:30:20.577242Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:21.182414Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:30:21.280993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:21.281142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:21.453401Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013499828858091:2081] 1767886220495726 != 1767886220495729 2026-01-08T15:30:21.463637Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:21.482724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:21.490395Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:21.597449Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:30:21.958995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:30:21.959018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:30:21.959024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:30:21.959117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:30:23.310900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:23.475420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:23.774464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:24.166555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:24.288692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:25.534736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013499828858206:2152];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:30:25.534807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:30:27.924666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013529893631058:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:27.924780Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:27.929309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013529893631067:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:27.929397Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.293426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.360096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.418007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.457189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.495266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.546728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.609396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.687856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:28.859845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013534188599239:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.860352Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.861341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013534188599244:2494], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.861403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013534188599245:2495], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.861571Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:30:28.868099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:30:28.889673Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013534188599248:2496], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:30:28.998036Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013534188599299:3787] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:30:30.905650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, bu ... de 20 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:35:50.790433Z node 20 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:50.862923Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [20:7593014918165147367:2081] 1767886550632429 != 1767886550632432 2026-01-08T15:35:50.910725Z node 20 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:50.921227Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:50.921402Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:50.926478Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:51.064523Z node 20 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:51.135375Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:51.135414Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:51.135430Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:51.135585Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:51.690959Z node 20 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:52.631947Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:52.653319Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:52.787988Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:53.190687Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:53.361609Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:55.634220Z node 20 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7593014918165147402:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:35:55.634379Z node 20 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:35:59.550620Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7593014956819854962:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:59.550861Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:59.551728Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7593014956819854972:2408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:59.551833Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:59.811524Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:59.944750Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.085179Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.195159Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.355014Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.540039Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.713352Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:00.831169Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:01.070967Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7593014965409790453:2495], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:01.071136Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:01.071525Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7593014965409790458:2498], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:01.071604Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7593014965409790459:2499], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:01.071890Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:01.080946Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:01.111492Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7593014965409790462:2500], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:36:01.192237Z node 20 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [20:7593014965409790513:3800] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:04.968157Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.908153Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:05.908193Z node 20 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/replication/ydb_proxy/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 >> TConsoleTests::TestSetDefaultStorageUnitsQuota [GOOD] >> TConsoleTests::TestSetDefaultComputationalUnitsQuota ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MoveGroups [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3067:2106] recipient: [1:2963:2117] 2026-01-08T15:35:33.797982Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:33.799197Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:33.799606Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:33.801675Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:33.802485Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:33.802823Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:33.802851Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:33.803167Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:33.816463Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:33.816594Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:33.816763Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:33.816898Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:33.816981Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:33.817041Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3088:2106] recipient: [1:60:2107] 2026-01-08T15:35:33.831140Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:33.831304Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:33.873477Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:33.873616Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:33.873697Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:33.873781Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:33.873984Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:33.874059Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:33.874093Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:33.874147Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:33.886296Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:33.886460Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:33.898350Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:33.898510Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:33.899940Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:33.900007Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:33.900227Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:33.900280Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:33.917766Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 150 PDiskFilter { Property { Type: ROT } } } } } 2026-01-08T15:35:33.921729Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2026-01-08T15:35:33.921830Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2026-01-08T15:35:33.921862Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2026-01-08T15:35:33.921892Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2026-01-08T15:35:33.921917Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2026-01-08T15:35:33.921942Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2026-01-08T15:35:33.921966Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2026-01-08T15:35:33.922006Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2026-01-08T15:35:33.922031Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2026-01-08T15:35:33.922053Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2026-01-08T15:35:33.922077Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2026-01-08T15:35:33.922101Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2026-01-08T15:35:33.922153Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2026-01-08T15:35:33.922193Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2026-01-08T15:35:33.922225Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2026-01-08T15:35:33.922267Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2026-01-08T15:35:33.922291Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2026-01-08T15:35:33.922334Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2026-01-08T15:35:33.922357Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2026-01-08T15:35:33.922380Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2026-01-08T15:35:33.922402Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:3 ... 8:1000 Path# /dev/disk1 2026-01-08T15:36:02.716939Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 178:1001 Path# /dev/disk2 2026-01-08T15:36:02.716996Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 178:1002 Path# /dev/disk3 2026-01-08T15:36:02.717050Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 179:1000 Path# /dev/disk1 2026-01-08T15:36:02.717107Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 179:1001 Path# /dev/disk2 2026-01-08T15:36:02.717154Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 179:1002 Path# /dev/disk3 2026-01-08T15:36:02.717185Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 180:1000 Path# /dev/disk1 2026-01-08T15:36:02.717240Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 180:1001 Path# /dev/disk2 2026-01-08T15:36:02.717298Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 180:1002 Path# /dev/disk3 2026-01-08T15:36:02.717356Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 181:1000 Path# /dev/disk1 2026-01-08T15:36:02.719513Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 181:1001 Path# /dev/disk2 2026-01-08T15:36:02.719611Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 181:1002 Path# /dev/disk3 2026-01-08T15:36:02.719684Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 182:1000 Path# /dev/disk1 2026-01-08T15:36:02.719739Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 182:1001 Path# /dev/disk2 2026-01-08T15:36:02.719800Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 182:1002 Path# /dev/disk3 2026-01-08T15:36:02.719851Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 183:1000 Path# /dev/disk1 2026-01-08T15:36:02.719925Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 183:1001 Path# /dev/disk2 2026-01-08T15:36:02.720006Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 183:1002 Path# /dev/disk3 2026-01-08T15:36:02.720109Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 184:1000 Path# /dev/disk1 2026-01-08T15:36:02.720175Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 184:1001 Path# /dev/disk2 2026-01-08T15:36:02.720247Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 184:1002 Path# /dev/disk3 2026-01-08T15:36:02.720307Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 185:1000 Path# /dev/disk1 2026-01-08T15:36:02.720358Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 185:1001 Path# /dev/disk2 2026-01-08T15:36:02.720413Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 185:1002 Path# /dev/disk3 2026-01-08T15:36:02.720474Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 186:1000 Path# /dev/disk1 2026-01-08T15:36:02.720526Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 186:1001 Path# /dev/disk2 2026-01-08T15:36:02.720565Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 186:1002 Path# /dev/disk3 2026-01-08T15:36:02.720615Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 187:1000 Path# /dev/disk1 2026-01-08T15:36:02.720666Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 187:1001 Path# /dev/disk2 2026-01-08T15:36:02.720718Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 187:1002 Path# /dev/disk3 2026-01-08T15:36:02.720774Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 188:1000 Path# /dev/disk1 2026-01-08T15:36:02.720825Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 188:1001 Path# /dev/disk2 2026-01-08T15:36:02.720873Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 188:1002 Path# /dev/disk3 2026-01-08T15:36:02.720952Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 189:1000 Path# /dev/disk1 2026-01-08T15:36:02.721003Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 189:1001 Path# /dev/disk2 2026-01-08T15:36:02.721052Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 189:1002 Path# /dev/disk3 2026-01-08T15:36:02.721091Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 190:1000 Path# /dev/disk1 2026-01-08T15:36:02.721136Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 190:1001 Path# /dev/disk2 2026-01-08T15:36:02.721187Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 190:1002 Path# /dev/disk3 2026-01-08T15:36:02.721240Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 191:1000 Path# /dev/disk1 2026-01-08T15:36:02.721287Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 191:1001 Path# /dev/disk2 2026-01-08T15:36:02.721338Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 191:1002 Path# /dev/disk3 2026-01-08T15:36:02.721386Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 192:1000 Path# /dev/disk1 2026-01-08T15:36:02.721434Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 192:1001 Path# /dev/disk2 2026-01-08T15:36:02.721485Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 192:1002 Path# /dev/disk3 2026-01-08T15:36:02.721520Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 193:1000 Path# /dev/disk1 2026-01-08T15:36:02.721568Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 193:1001 Path# /dev/disk2 2026-01-08T15:36:02.721618Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 193:1002 Path# /dev/disk3 2026-01-08T15:36:02.721666Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 194:1000 Path# /dev/disk1 2026-01-08T15:36:02.721725Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 194:1001 Path# /dev/disk2 2026-01-08T15:36:02.721773Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 194:1002 Path# /dev/disk3 2026-01-08T15:36:02.721826Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 195:1000 Path# /dev/disk1 2026-01-08T15:36:02.721877Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 195:1001 Path# /dev/disk2 2026-01-08T15:36:02.721934Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 195:1002 Path# /dev/disk3 2026-01-08T15:36:02.721989Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 196:1000 Path# /dev/disk1 2026-01-08T15:36:02.722024Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 196:1001 Path# /dev/disk2 2026-01-08T15:36:02.722090Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 196:1002 Path# /dev/disk3 2026-01-08T15:36:02.722146Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 197:1000 Path# /dev/disk1 2026-01-08T15:36:02.722197Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 197:1001 Path# /dev/disk2 2026-01-08T15:36:02.722254Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 197:1002 Path# /dev/disk3 2026-01-08T15:36:02.722303Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 198:1000 Path# /dev/disk1 2026-01-08T15:36:02.722378Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 198:1001 Path# /dev/disk2 2026-01-08T15:36:02.722442Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 198:1002 Path# /dev/disk3 2026-01-08T15:36:02.722496Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 199:1000 Path# /dev/disk1 2026-01-08T15:36:02.722552Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 199:1001 Path# /dev/disk2 2026-01-08T15:36:02.722611Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 199:1002 Path# /dev/disk3 2026-01-08T15:36:02.722671Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 200:1000 Path# /dev/disk1 2026-01-08T15:36:02.722718Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 200:1001 Path# /dev/disk2 2026-01-08T15:36:02.722772Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 200:1002 Path# /dev/disk3 2026-01-08T15:36:03.301082Z node 151 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.590632s 2026-01-08T15:36:03.301331Z node 151 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.590907s 2026-01-08T15:36:03.367815Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2026-01-08T15:36:03.491574Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 1 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 1 ExplicitGroupId: 2147483748 } } } 2026-01-08T15:36:03.524178Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2026-01-08T15:36:03.645366Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 2 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 2 ExplicitGroupId: 2147483749 } } } 2026-01-08T15:36:03.674475Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2026-01-08T15:36:03.828769Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 3 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 3 } } } 2026-01-08T15:36:03.854126Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/batch_operations/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:30:52.579436Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:30:52.716106Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:30:52.716502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:30:52.716679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:30:53.101231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:30:53.101404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:30:53.188725Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886248500093 != 1767886248500097 2026-01-08T15:30:53.202685Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:30:53.267877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:30:53.385656Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:30:53.587187Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:30:53.858932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:30:54.052735Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:314: actor# [1:68:2115] Handle TEvProposeTransaction 2026-01-08T15:30:54.052821Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:237: actor# [1:68:2115] TxId# 281474976715657 ProcessProposeTransaction 2026-01-08T15:30:54.052957Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:256: actor# [1:68:2115] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:866:2749] 2026-01-08T15:30:54.226814Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1822: Actor# [1:866:2749] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2026-01-08T15:30:54.226931Z node 1 :TX_PROXY DEBUG: schemereq.cpp:647: Actor# [1:866:2749] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2026-01-08T15:30:54.227657Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1887: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2026-01-08T15:30:54.227780Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1877: Actor# [1:866:2749] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2026-01-08T15:30:54.228185Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1631: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2026-01-08T15:30:54.228377Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1666: Actor# [1:866:2749] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2026-01-08T15:30:54.228542Z node 1 :TX_PROXY DEBUG: schemereq.cpp:117: Actor# [1:866:2749] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2026-01-08T15:30:54.230742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:30:54.231287Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1533: Actor# [1:866:2749] txid# 281474976715657 HANDLE EvClientConnected 2026-01-08T15:30:54.232083Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1555: Actor# [1:866:2749] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2026-01-08T15:30:54.232155Z node 1 :TX_PROXY DEBUG: schemereq.cpp:627: Actor# [1:866:2749] txid# 281474976715657 SEND to# [1:847:2730] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2026-01-08T15:30:54.274981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828672, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvBoot 2026-01-08T15:30:54.276113Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3119: StateInit, received event# 268828673, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvRestored 2026-01-08T15:30:54.276435Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:890:2770] 2026-01-08T15:30:54.276801Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:30:54.324667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateInactive, received event# 268828684, Sender [1:882:2764], Recipient [1:890:2770]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:30:54.325526Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:30:54.325664Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:30:54.327641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:30:54.327765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:30:54.327826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:30:54.328264Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:30:54.328430Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:30:54.328529Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:906:2770] in generation 1 2026-01-08T15:30:54.339420Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:30:54.397465Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:30:54.397702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:30:54.397830Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:908:2780] 2026-01-08T15:30:54.397873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:30:54.397915Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:30:54.397974Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:30:54.398143Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 2146435072, Sender [1:890:2770], Recipient [1:890:2770]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2026-01-08T15:30:54.398204Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3178: StateWork, processing event TEvPrivate::TEvProgressTransaction 2026-01-08T15:30:54.398655Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:30:54.398762Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:30:54.398884Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:30:54.398958Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:30:54.399028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2026-01-08T15:30:54.399076Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2026-01-08T15:30:54.399116Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2026-01-08T15:30:54.399153Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:30:54.399218Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:30:54.399342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [1:893:2771], Recipient [1:890:2770]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:54.399395Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:30:54.399457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:889:2769], serverId# [1:893:2771], sessionId# [0:0:0] 2026-01-08T15:30:54.399860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269549568, Sender [1:400:2399], Recipient [1:893:2771] 2026-01-08T15:30:54.399910Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvDataShard::TEvProposeTransaction 2026-01-08T15:30:54.400041Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:30:54.400267Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2026-01-08T15:30:54.400331Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:30:54.400421Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:30:54.400470Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status ... 37: Add [0:7] at 72075186224037889 to execution unit ExecuteRead 2026-01-08T15:36:08.688622Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037889 on unit ExecuteRead 2026-01-08T15:36:08.688744Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2503 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2026-01-08T15:36:08.688991Z node 29 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v2503/18446744073709551615 2026-01-08T15:36:08.689046Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037889 Complete read# {[29:1291:3056], 1} after executionsCount# 1 2026-01-08T15:36:08.689094Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037889 read iterator# {[29:1291:3056], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:36:08.689172Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037889 read iterator# {[29:1291:3056], 1} finished in read 2026-01-08T15:36:08.689232Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037889 is Executed 2026-01-08T15:36:08.689269Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037889 executing on unit ExecuteRead 2026-01-08T15:36:08.689301Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:7] at 72075186224037889 to execution unit CompletedOperations 2026-01-08T15:36:08.689333Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:7] at 72075186224037889 on unit CompletedOperations 2026-01-08T15:36:08.689390Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:7] at 72075186224037889 is Executed 2026-01-08T15:36:08.689421Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:7] at 72075186224037889 executing on unit CompletedOperations 2026-01-08T15:36:08.689453Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:7] at 72075186224037889 has finished 2026-01-08T15:36:08.689487Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2026-01-08T15:36:08.689583Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:36:08.689641Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:36:08.689686Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2026-01-08T15:36:08.690080Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72075186224037889] send [29:1145:2944] 2026-01-08T15:36:08.690120Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72075186224037889] push event to server [29:1145:2944] 2026-01-08T15:36:08.690424Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:49: TClient[72075186224037890] ::Bootstrap [29:1293:3059] 2026-01-08T15:36:08.690548Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:542: TClient[72075186224037890] lookup [29:1293:3059] 2026-01-08T15:36:08.690709Z node 29 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [29:1291:3056], Recipient [29:927:2787]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2026-01-08T15:36:08.690766Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037889 ReadCancel: { ReadId: 1 } 2026-01-08T15:36:08.690862Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:148: TClient[72075186224037890] queue send [29:1293:3059] 2026-01-08T15:36:08.691008Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:188: TClient[72075186224037890] forward result local node, try to connect [29:1293:3059] 2026-01-08T15:36:08.691057Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:687: TClient[72075186224037890]::SendEvent [29:1293:3059] 2026-01-08T15:36:08.691257Z node 29 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269877761, Sender [29:1294:3060], Recipient [29:1248:3028]: NKikimr::TEvTabletPipe::TEvServerConnected 2026-01-08T15:36:08.691301Z node 29 :TX_DATASHARD TRACE: datashard_impl.h:3189: StateWork, processing event TEvTabletPipe::TEvServerConnected 2026-01-08T15:36:08.691347Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037890, clientId# [29:1293:3059], serverId# [29:1294:3060], sessionId# [0:0:0] 2026-01-08T15:36:08.691394Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:308: TClient[72075186224037890] connected with status OK role: Leader [29:1293:3059] 2026-01-08T15:36:08.691435Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:323: TClient[72075186224037890] send queued [29:1293:3059] 2026-01-08T15:36:08.691467Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72075186224037890] push event to server [29:1293:3059] 2026-01-08T15:36:08.691683Z node 29 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553215, Sender [29:1291:3056], Recipient [29:1248:3028]: NKikimrTxDataShard.TEvRead ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2503 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2026-01-08T15:36:08.691794Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2026-01-08T15:36:08.691857Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2026-01-08T15:36:08.691936Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2697: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2026-01-08T15:36:08.692008Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2026-01-08T15:36:08.692079Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:36:08.692114Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2026-01-08T15:36:08.692143Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2026-01-08T15:36:08.692183Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2026-01-08T15:36:08.692249Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2026-01-08T15:36:08.692299Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:36:08.692327Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2026-01-08T15:36:08.692357Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2026-01-08T15:36:08.692391Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2026-01-08T15:36:08.692511Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1748: 72075186224037890 Execute read# 1, request: { ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 38 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2503 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2026-01-08T15:36:08.692765Z node 29 :TX_DATASHARD TRACE: datashard.cpp:2488: PromoteImmediatePostExecuteEdges at 72075186224037890 promoting UnprotectedReadEdge to v2503/18446744073709551615 2026-01-08T15:36:08.692821Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2396: 72075186224037890 Complete read# {[29:1291:3056], 2} after executionsCount# 1 2026-01-08T15:36:08.692868Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2370: 72075186224037890 read iterator# {[29:1291:3056], 2} sends rowCount# 1, bytes# 32, quota rows left# 998, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2026-01-08T15:36:08.692944Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2421: 72075186224037890 read iterator# {[29:1291:3056], 2} finished in read 2026-01-08T15:36:08.693002Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:36:08.693035Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2026-01-08T15:36:08.693066Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1937: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2026-01-08T15:36:08.693099Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1848: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2026-01-08T15:36:08.693153Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1883: Execution status for [0:1] at 72075186224037890 is Executed 2026-01-08T15:36:08.693182Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1931: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2026-01-08T15:36:08.693213Z node 29 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1943: Execution plan for [0:1] at 72075186224037890 has finished 2026-01-08T15:36:08.693249Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2932: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2026-01-08T15:36:08.693353Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{17, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2026-01-08T15:36:08.693411Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2026-01-08T15:36:08.693453Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2981: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2026-01-08T15:36:08.695777Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:154: TClient[72075186224037890] send [29:1293:3059] 2026-01-08T15:36:08.695828Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:645: TClient[72075186224037890] push event to server [29:1293:3059] 2026-01-08T15:36:08.695992Z node 29 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, received event# 269553219, Sender [29:1291:3056], Recipient [29:1248:3028]: NKikimrTxDataShard.TEvReadCancel ReadId: 2 2026-01-08T15:36:08.696045Z node 29 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3668: 72075186224037890 ReadCancel: { ReadId: 2 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 11 } items { uint32_value: 111 } }, { items { uint32_value: 21 } items { uint32_value: 21 } } |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> ColumnStatistics::CountMinSketchStatistics >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_volatile/unittest >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain >> TJaegerTracingConfiguratorTests::SamplingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags >> TConsoleTests::TestCreateSharedTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenant |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> TConsoleTests::TestCreateTenant [GOOD] >> TConsoleTests::TestCreateTenantExtSubdomain >> BasicStatistics::PersistenceWithStorageFailuresAndReboots >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits [GOOD] >> TJaegerTracingConfiguratorTests::SharedSamplingLimits >> TAuditTest::OptionsRequestsAreNotAudited [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types12-all_types12-index12---] [GOOD] |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> TAuditTest::OtherGetRequestsAreAudited [GOOD] >> TSchemeShardViewTest::EmptyName |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TAuditTest::OptionsRequestsAreNotAudited [GOOD] |88.3%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TAuditTest::OtherGetRequestsAreAudited [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache >> TJaegerTracingConfiguratorTests::SharedSamplingLimits [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges |88.3%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get >> FolderServiceTest::TFolderService >> TLogSettingsConfiguratorTests::TestNoChanges [GOOD] >> TLogSettingsConfiguratorTests::TestAddComponentEntries |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/library/ycloud/impl/ut/unittest >> TSchemeShardViewTest::EmptyName [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser [GOOD] >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription >> KqpPg::TableInsert-useSink [GOOD] >> KqpPg::TempTablesSessionsIsolation >> TUrlMatcherTest::MatchRecursive [GOOD] >> TAuditTest::DeniedPathsAreNotAudited [GOOD] |88.4%| [TA] $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleTests::TestSetDefaultComputationalUnitsQuota [GOOD] >> TConsoleTests::TestTenantConfigConsistency >> TAuditTest::ModifyingMethodsAlwaysAuditable [GOOD] >> TServiceAccountServiceTest::Get [GOOD] >> TUrlMatcherTest::MatchExactPathOnly [GOOD] >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain [GOOD] >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits >> TLogSettingsConfiguratorTests::TestAddComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TUrlMatcherTest::MatchRecursive [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest >> TAuditTest::AuditDisabledWithoutAppData [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:17.247126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:17.247220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:17.247258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:17.247293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:17.247350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:17.247455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:17.247539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:17.247630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:17.248532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:17.248846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:17.352124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:17.352175Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:17.366919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:17.367245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:17.367428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:17.383347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:17.383828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:17.384633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:17.385412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:17.388787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:17.388984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:17.390170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:17.390239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:17.390428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:17.390476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:17.390588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:17.390745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:17.540489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.541568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.541735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.541828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.541897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.541967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:17.542823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... irst GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:18.508891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:18.509044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2026-01-08T15:36:18.509237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:18.509334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2026-01-08T15:36:18.509374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2026-01-08T15:36:18.509408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 2 -> 3 2026-01-08T15:36:18.511566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:18.511647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2026-01-08T15:36:18.511716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 3 -> 128 2026-01-08T15:36:18.514133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:18.514195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:18.514263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:18.514341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2026-01-08T15:36:18.514577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:36:18.516695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2026-01-08T15:36:18.516829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 2026-01-08T15:36:18.517244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000037, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:18.517396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000037 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:18.517450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:18.517751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 128 -> 240 2026-01-08T15:36:18.517807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:18.517966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:18.518028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:412: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2026-01-08T15:36:18.520017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:18.520062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:18.520256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:18.520295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2026-01-08T15:36:18.520700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:18.520775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 1:0 ProgressState 2026-01-08T15:36:18.520886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:36:18.520920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:36:18.520959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#1:0 progress is 1/1 2026-01-08T15:36:18.520991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:36:18.521027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2026-01-08T15:36:18.521062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2026-01-08T15:36:18.521096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 1:0 2026-01-08T15:36:18.521125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 1:0 2026-01-08T15:36:18.521207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:18.521254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2026-01-08T15:36:18.521286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2026-01-08T15:36:18.521749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 1 2026-01-08T15:36:18.521858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 1 2026-01-08T15:36:18.521910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2026-01-08T15:36:18.521942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2026-01-08T15:36:18.521979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:18.522060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2026-01-08T15:36:18.524849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2026-01-08T15:36:18.525315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2026-01-08T15:36:18.525920Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:433: actor# [1:669:2659] Bootstrap 2026-01-08T15:36:18.527200Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:452: actor# [1:669:2659] Become StateWork (SchemeCache [1:674:2664]) 2026-01-08T15:36:18.530483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "" QueryText: "Some query" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:36:18.530685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0 2026-01-08T15:36:18.530775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0, viewDescription: Name: "" QueryText: "Some query" 2026-01-08T15:36:18.530915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2026-01-08T15:36:18.531992Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:212: actor# [1:669:2659] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2026-01-08T15:36:18.535603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/\', error: path part shouldn\'t be empty" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:18.535865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, operation: CREATE VIEW, path: /MyRoot/ 2026-01-08T15:36:18.536119Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TAuditTest::DeniedPathsAreNotAudited [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TAuditTest::ModifyingMethodsAlwaysAuditable [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TUrlMatcherTest::MatchExactPathOnly [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mon/audit/ut/unittest >> TAuditTest::AuditDisabledWithoutAppData [GOOD] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mon/audit/ut/unittest |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3064:2106] recipient: [1:2963:2117] Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3067:2106] recipient: [1:2963:2117] 2026-01-08T15:35:40.299955Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:40.301111Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:40.301482Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:40.303583Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:40.304454Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:40.304808Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.304839Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:40.305169Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:40.313902Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:40.314028Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:40.314196Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:40.314355Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.314448Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:40.314519Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3066:2119] sender: [1:3088:2106] recipient: [1:60:2107] 2026-01-08T15:35:40.326934Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:40.327066Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.373259Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:40.373425Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.373528Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:40.373635Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.373768Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:40.373853Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.373896Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:40.373960Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.385085Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:40.385603Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.396991Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:40.397151Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:40.400178Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:40.400254Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:40.400478Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:40.400527Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:40.418293Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 60 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2026-01-08T15:35:40.419157Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2026-01-08T15:35:40.419216Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2026-01-08T15:35:40.419240Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2026-01-08T15:35:40.419262Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2026-01-08T15:35:40.419284Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2026-01-08T15:35:40.419305Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2026-01-08T15:35:40.419328Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2026-01-08T15:35:40.419366Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2026-01-08T15:35:40.419392Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2026-01-08T15:35:40.419413Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2026-01-08T15:35:40.419451Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2026-01-08T15:35:40.419486Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2026-01-08T15:35:40.419513Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2026-01-08T15:35:40.419534Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2026-01-08T15:35:40.419555Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2026-01-08T15:35:40.419596Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2026-01-08T15:35:40.419635Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2026-01-08T15:35:40.419680Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2026-01-08T15:35:40.419702Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2026-01-08T15:35:40.419730Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2026-01-08T15:35:40.419751Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2026-01-08T15:35:40.419782Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2026-01-08T15:35:40.419811Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2026-01-08T15:35:40.419848Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2026-01-08T15:35:40.419870Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2026-01-08T15:35:40.419891Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2026-01-08T15:35:40.419913Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2026-01-08T15:35:40.419939Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2026-01-08T15:35:40.419962Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2026-01-08T15:35:40.419997Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 2026-01-08T15:35:40.420023Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2026-01-08T15:35:40.420045Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2026-01-08T15:35:40.420082Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Cr ... 356} Create new pdisk PDiskId# 204:1002 Path# /dev/disk3 2026-01-08T15:36:08.930701Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 205:1000 Path# /dev/disk1 2026-01-08T15:36:08.930735Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 205:1001 Path# /dev/disk2 2026-01-08T15:36:08.930766Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 205:1002 Path# /dev/disk3 2026-01-08T15:36:08.930796Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 206:1000 Path# /dev/disk1 2026-01-08T15:36:08.930826Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 206:1001 Path# /dev/disk2 2026-01-08T15:36:08.930855Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 206:1002 Path# /dev/disk3 2026-01-08T15:36:08.930884Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 207:1000 Path# /dev/disk1 2026-01-08T15:36:08.930915Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 207:1001 Path# /dev/disk2 2026-01-08T15:36:08.930948Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 207:1002 Path# /dev/disk3 2026-01-08T15:36:08.930976Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 208:1000 Path# /dev/disk1 2026-01-08T15:36:08.931008Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 208:1001 Path# /dev/disk2 2026-01-08T15:36:08.931039Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 208:1002 Path# /dev/disk3 2026-01-08T15:36:08.931070Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 209:1000 Path# /dev/disk1 2026-01-08T15:36:08.931102Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 209:1001 Path# /dev/disk2 2026-01-08T15:36:08.931140Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 209:1002 Path# /dev/disk3 2026-01-08T15:36:08.931171Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 210:1000 Path# /dev/disk1 2026-01-08T15:36:08.931202Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 210:1001 Path# /dev/disk2 2026-01-08T15:36:08.931234Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 210:1002 Path# /dev/disk3 2026-01-08T15:36:09.324164Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.405723s 2026-01-08T15:36:09.324413Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.406000s 2026-01-08T15:36:09.398698Z node 161 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 4 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 4 } ItemConfigGeneration: 1 } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 180 PDiskFilter { Property { Type: ROT } } ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2026-01-08T15:36:09.407731Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 211:1000 Path# /dev/disk1 2026-01-08T15:36:09.407830Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 211:1001 Path# /dev/disk2 2026-01-08T15:36:09.407896Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 211:1002 Path# /dev/disk3 2026-01-08T15:36:09.407924Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 212:1000 Path# /dev/disk1 2026-01-08T15:36:09.407981Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 212:1001 Path# /dev/disk2 2026-01-08T15:36:09.408011Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 212:1002 Path# /dev/disk3 2026-01-08T15:36:09.408039Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 213:1000 Path# /dev/disk1 2026-01-08T15:36:09.408067Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 213:1001 Path# /dev/disk2 2026-01-08T15:36:09.408094Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 213:1002 Path# /dev/disk3 2026-01-08T15:36:09.408120Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 214:1000 Path# /dev/disk1 2026-01-08T15:36:09.408170Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 214:1001 Path# /dev/disk2 2026-01-08T15:36:09.408198Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 214:1002 Path# /dev/disk3 2026-01-08T15:36:09.408253Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 215:1000 Path# /dev/disk1 2026-01-08T15:36:09.408315Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 215:1001 Path# /dev/disk2 2026-01-08T15:36:09.408353Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 215:1002 Path# /dev/disk3 2026-01-08T15:36:09.408389Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 216:1000 Path# /dev/disk1 2026-01-08T15:36:09.408437Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 216:1001 Path# /dev/disk2 2026-01-08T15:36:09.408483Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 216:1002 Path# /dev/disk3 2026-01-08T15:36:09.408520Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 217:1000 Path# /dev/disk1 2026-01-08T15:36:09.408546Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 217:1001 Path# /dev/disk2 2026-01-08T15:36:09.408584Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 217:1002 Path# /dev/disk3 2026-01-08T15:36:09.408619Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 218:1000 Path# /dev/disk1 2026-01-08T15:36:09.408646Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 218:1001 Path# /dev/disk2 2026-01-08T15:36:09.408673Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 218:1002 Path# /dev/disk3 2026-01-08T15:36:09.408707Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 219:1000 Path# /dev/disk1 2026-01-08T15:36:09.408760Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 219:1001 Path# /dev/disk2 2026-01-08T15:36:09.408791Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 219:1002 Path# /dev/disk3 2026-01-08T15:36:09.408818Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 220:1000 Path# /dev/disk1 2026-01-08T15:36:09.408854Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 220:1001 Path# /dev/disk2 2026-01-08T15:36:09.408891Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 220:1002 Path# /dev/disk3 2026-01-08T15:36:09.525309Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.127608s 2026-01-08T15:36:09.525559Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.127886s |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestChangeDefaults |88.4%| [TA] $(B)/ydb/core/mon/audit/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/mon/audit/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_shred/ydb-core-tx-schemeshard-ut_shred |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_shred/ydb-core-tx-schemeshard-ut_shred |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |88.4%| [TA] {RESULT} $(B)/ydb/core/mon/audit/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_shred/ydb-core-tx-schemeshard-ut_shred |88.4%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut >> TSchemeShardViewTest::AsyncDropSameView >> TConsoleTests::TestCreateTenantExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantWrongName >> TSchemeShardViewTest::AsyncCreateSameView |88.4%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export >> TLogSettingsConfiguratorTests::TestChangeDefaults [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestApplyValidatorsWithOldConfig [GOOD] >> TModificationsValidatorTests::TestChecksLimitError [GOOD] >> TModificationsValidatorTests::TestChecksLimitWarning >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription >> TSchemeShardViewTest::CreateView >> TSchemeShardViewTest::DropView >> FolderServiceTest::TFolderService [GOOD] >> TConsoleTests::TestTenantConfigConsistency [GOOD] >> TConsoleTests::TestSetConfig >> TCacheTest::RacyRecreateAndSync >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [FAIL] >> TSchemeShardViewTest::AsyncCreateDifferentViews >> TSchemeShardViewTest::ReadOnlyMode >> TConsoleTests::TestCreateServerlessTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut_console/unittest >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] Test command err: 2026-01-08T15:36:08.462266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:08.462364Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:08.699191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:09.925709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:09.925770Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:10.011732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:11.224344Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:11.224412Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:11.308516Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:12.535668Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:12.535753Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:12.623559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:13.920335Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:13.920400Z node 5 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:14.016251Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:15.283093Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:15.283177Z node 6 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:15.380664Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:16.676330Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:16.676430Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:16.767883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) E0108 15:36:18.383157137 223602 trace.cc:67] Unknown trace var: 'sdk_authz' 2026-01-08T15:36:18.383943Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component CMS_CONFIGS has been changed from WARN to NOTICE 2026-01-08T15:36:18.384047Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component CMS_CONFIGS has been changed from WARN to DEBUG 2026-01-08T15:36:18.384096Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component CMS_CLUSTER has been changed from WARN to NOTICE 2026-01-08T15:36:18.384130Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component CMS_CLUSTER has been changed from WARN to DEBUG 2026-01-08T15:36:18.384161Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_PROXY has been changed from WARN to NOTICE 2026-01-08T15:36:18.384195Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_PROXY has been changed from WARN to DEBUG 2026-01-08T15:36:18.384227Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_WORKER has been changed from WARN to NOTICE 2026-01-08T15:36:18.384258Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_WORKER has been changed from WARN to DEBUG 2026-01-08T15:36:18.384290Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_GATEWAY has been changed from WARN to NOTICE 2026-01-08T15:36:18.384320Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_GATEWAY has been changed from WARN to DEBUG 2026-01-08T15:36:18.384353Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_YQL has been changed from WARN to NOTICE 2026-01-08T15:36:18.384385Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_YQL has been changed from WARN to DEBUG 2026-01-08T15:36:18.384417Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_EXECUTER has been changed from WARN to NOTICE 2026-01-08T15:36:18.384452Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_EXECUTER has been changed from WARN to DEBUG 2026-01-08T15:36:18.384485Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_COMPUTE has been changed from WARN to NOTICE 2026-01-08T15:36:18.384518Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_COMPUTE has been changed from WARN to DEBUG 2026-01-08T15:36:18.384552Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_SLOW_LOG has been changed from WARN to NOTICE 2026-01-08T15:36:18.384583Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_SLOW_LOG has been changed from WARN to DEBUG 2026-01-08T15:36:18.384614Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_COMPILE_SERVICE has been changed from WARN to NOTICE 2026-01-08T15:36:18.384648Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_COMPILE_SERVICE has been changed from WARN to DEBUG 2026-01-08T15:36:18.384678Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_COMPILE_ACTOR has been changed from WARN to NOTICE 2026-01-08T15:36:18.384711Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_COMPILE_ACTOR has been changed from WARN to DEBUG 2026-01-08T15:36:18.384744Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_RESOURCE_MANAGER has been changed from WARN to NOTICE 2026-01-08T15:36:18.384776Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_RESOURCE_MANAGER has been changed from WARN to DEBUG 2026-01-08T15:36:18.384809Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TABLET_RESOURCE_BROKER has been changed from WARN to NOTICE 2026-01-08T15:36:18.384840Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TABLET_RESOURCE_BROKER has been changed from WARN to DEBUG 2026-01-08T15:36:18.384870Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_BLOBS_STORAGE has been changed from WARN to NOTICE 2026-01-08T15:36:18.384901Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_BLOBS_STORAGE has been changed from WARN to DEBUG 2026-01-08T15:36:18.384933Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_COMPILE_REQUEST has been changed from WARN to NOTICE 2026-01-08T15:36:18.384962Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_COMPILE_REQUEST has been changed from WARN to DEBUG 2026-01-08T15:36:18.384991Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_NODE has been changed from WARN to NOTICE 2026-01-08T15:36:18.385023Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_NODE has been changed from WARN to DEBUG 2026-01-08T15:36:18.385052Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_LOAD_TEST has been changed from WARN to NOTICE 2026-01-08T15:36:18.385080Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_LOAD_TEST has been changed from WARN to DEBUG 2026-01-08T15:36:18.385112Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_SESSION has been changed from WARN to NOTICE 2026-01-08T15:36:18.385146Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_SESSION has been changed from WARN to DEBUG 2026-01-08T15:36:18.385181Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KQP_COMPILE_COMPUTATION_PATTERN_SERVICE has been changed from WARN to NOTICE 2026-01-08T15:36:18.385215Z node 8 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KQP_COMPILE_COMPUTATION_PATTERN_SERVICE has been changed from WARN to DEBUG 2026-01-08T15:36:18.385244Z node 8 :CMS_CONFIGS NOTICE: log_settings_con ... 026-01-08T15:36:21.847724Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_VDISK_BALANCING has been changed from WARN to ALERT 2026-01-08T15:36:21.847751Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_VDISK_BALANCING has been changed from 0 to 10 2026-01-08T15:36:21.847780Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_GETBLOCK has been changed from WARN to ALERT 2026-01-08T15:36:21.847811Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_GETBLOCK has been changed from WARN to ALERT 2026-01-08T15:36:21.847838Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_GETBLOCK has been changed from 0 to 10 2026-01-08T15:36:21.847866Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_SHRED has been changed from WARN to ALERT 2026-01-08T15:36:21.847892Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_SHRED has been changed from WARN to ALERT 2026-01-08T15:36:21.847919Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_SHRED has been changed from 0 to 10 2026-01-08T15:36:21.847949Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_CHECKINTEGRITY has been changed from WARN to ALERT 2026-01-08T15:36:21.847976Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_CHECKINTEGRITY has been changed from WARN to ALERT 2026-01-08T15:36:21.848001Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_CHECKINTEGRITY has been changed from 0 to 10 2026-01-08T15:36:21.848032Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_BRIDGE has been changed from WARN to ALERT 2026-01-08T15:36:21.848079Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_BRIDGE has been changed from WARN to ALERT 2026-01-08T15:36:21.848108Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_BRIDGE has been changed from 0 to 10 2026-01-08T15:36:21.848139Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_CLUSTER_BALANCING has been changed from WARN to ALERT 2026-01-08T15:36:21.848172Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_CLUSTER_BALANCING has been changed from WARN to ALERT 2026-01-08T15:36:21.848199Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_CLUSTER_BALANCING has been changed from 0 to 10 2026-01-08T15:36:21.848229Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_BRIDGE_SYNC has been changed from WARN to ALERT 2026-01-08T15:36:21.848259Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_BRIDGE_SYNC has been changed from WARN to ALERT 2026-01-08T15:36:21.848287Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_BRIDGE_SYNC has been changed from 0 to 10 2026-01-08T15:36:21.848319Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PHANTOM_FLAG_STORAGE has been changed from WARN to ALERT 2026-01-08T15:36:21.848348Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PHANTOM_FLAG_STORAGE has been changed from WARN to ALERT 2026-01-08T15:36:21.848376Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PHANTOM_FLAG_STORAGE has been changed from 0 to 10 2026-01-08T15:36:21.848412Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component LDAP_AUTH_PROVIDER has been changed from WARN to ALERT 2026-01-08T15:36:21.848446Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component LDAP_AUTH_PROVIDER has been changed from WARN to ALERT 2026-01-08T15:36:21.848477Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component LDAP_AUTH_PROVIDER has been changed from 0 to 10 2026-01-08T15:36:21.848507Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component SASL_AUTH has been changed from WARN to ALERT 2026-01-08T15:36:21.848540Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component SASL_AUTH has been changed from WARN to ALERT 2026-01-08T15:36:21.848568Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component SASL_AUTH has been changed from 0 to 10 2026-01-08T15:36:21.848598Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GROUPED_MEMORY_LIMITER has been changed from WARN to ALERT 2026-01-08T15:36:21.848628Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component GROUPED_MEMORY_LIMITER has been changed from WARN to ALERT 2026-01-08T15:36:21.848669Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component GROUPED_MEMORY_LIMITER has been changed from 0 to 10 2026-01-08T15:36:21.848706Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component DATA_INTEGRITY has been changed from WARN to ALERT 2026-01-08T15:36:21.848735Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component DATA_INTEGRITY has been changed from WARN to ALERT 2026-01-08T15:36:21.848773Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component DATA_INTEGRITY has been changed from 0 to 10 2026-01-08T15:36:21.848827Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_PRIORITIES_QUEUE has been changed from WARN to ALERT 2026-01-08T15:36:21.848867Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_PRIORITIES_QUEUE has been changed from WARN to ALERT 2026-01-08T15:36:21.848896Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_PRIORITIES_QUEUE has been changed from 0 to 10 2026-01-08T15:36:21.848924Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BSCONFIG has been changed from WARN to ALERT 2026-01-08T15:36:21.848951Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BSCONFIG has been changed from WARN to ALERT 2026-01-08T15:36:21.848976Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BSCONFIG has been changed from 0 to 10 2026-01-08T15:36:21.849005Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component NAMESERVICE has been changed from WARN to ALERT 2026-01-08T15:36:21.849033Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component NAMESERVICE has been changed from WARN to ALERT 2026-01-08T15:36:21.849062Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component NAMESERVICE has been changed from 0 to 10 2026-01-08T15:36:21.849129Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BRIDGE has been changed from WARN to ALERT 2026-01-08T15:36:21.849180Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BRIDGE has been changed from WARN to ALERT 2026-01-08T15:36:21.849211Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BRIDGE has been changed from 0 to 10 2026-01-08T15:36:21.849242Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TRANSFER has been changed from WARN to ALERT 2026-01-08T15:36:21.849272Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TRANSFER has been changed from WARN to ALERT 2026-01-08T15:36:21.849298Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TRANSFER has been changed from 0 to 10 2026-01-08T15:36:21.849330Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TOKEN_MANAGER has been changed from WARN to ALERT 2026-01-08T15:36:21.849357Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TOKEN_MANAGER has been changed from WARN to ALERT 2026-01-08T15:36:21.849382Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TOKEN_MANAGER has been changed from 0 to 10 2026-01-08T15:36:21.849418Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component LOCAL_DB_BACKUP has been changed from WARN to ALERT 2026-01-08T15:36:21.849447Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component LOCAL_DB_BACKUP has been changed from WARN to ALERT 2026-01-08T15:36:21.849473Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component LOCAL_DB_BACKUP has been changed from 0 to 10 2026-01-08T15:36:21.849516Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component SCHEMA_SECRET_CACHE has been changed from WARN to ALERT 2026-01-08T15:36:21.849552Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component SCHEMA_SECRET_CACHE has been changed from WARN to ALERT 2026-01-08T15:36:21.849581Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component SCHEMA_SECRET_CACHE has been changed from 0 to 10 2026-01-08T15:36:21.849707Z node 11 :CMS_CONFIGS TRACE: log_settings_configurator.cpp:100: TLogSettingsConfigurator: Send TEvConfigNotificationResponse: SubscriptionId: 0 ConfigId { } ... waiting for config update (done) |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/ut_console/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest >> TCacheTest::WatchRoot >> TSchemeShardViewTest::AsyncDropSameView [GOOD] >> TUserAccountServiceTest::Get [GOOD] >> TSchemeShardViewTest::EmptyQueryText >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] >> TCacheTest::RacyRecreateAndSync [GOOD] >> TCacheTest::RacyCreateAndSync >> TSchemeShardViewTest::CreateView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2026-01-08T15:36:18.705105Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015037131212029:2151];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:18.705426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:19.114060Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:19.116496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:19.116616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:19.177250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:19.262174Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:19.352233Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:19.527400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:19.535196Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:83: [7d34c2d8edd0] Connect to grpc://localhost:20757 2026-01-08T15:36:19.561195Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7d34c2d8edd0] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2026-01-08T15:36:19.600675Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:111: [7d34c2d8edd0] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:20757: Failed to connect to remote host: Connection refused 2026-01-08T15:36:19.610756Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7d34c2d8edd0] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2026-01-08T15:36:19.611373Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:111: [7d34c2d8edd0] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:20757: Failed to connect to remote host: Connection refused 2026-01-08T15:36:19.714290Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:20.617419Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7d34c2d8edd0] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2026-01-08T15:36:20.634513Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:111: [7d34c2d8edd0] Status 5 Not Found 2026-01-08T15:36:20.642698Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7d34c2d8edd0] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2026-01-08T15:36:20.655753Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [7d34c2d8edd0] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } |88.4%| [TM] {BAZEL_UPLOAD} ydb/library/ycloud/impl/ut/unittest >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer [GOOD] >> TConsoleConfigTests::TestAddConfigItem >> TCacheTest::WatchRoot [GOOD] >> TCacheTestWithDrops::LookupErrorUponEviction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncDropSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2026-01-08T15:36:21.964398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:21.964514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:21.964563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:21.964602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:21.964645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:21.964674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:21.964747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:21.964830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:21.965737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:21.966047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:22.081153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:22.081208Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:22.110648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:22.111001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:22.111197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:22.128843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:22.129142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:22.129878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:22.130132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:22.139154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:22.139388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:22.140765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:22.140829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:22.140948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:22.140991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:22.141031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:22.141215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:22.326812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.331963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.332008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.332042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.332116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.332165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.332245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... d [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:23.299086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:23.301340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.301406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.301594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:36:23.301734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.301778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 1 2026-01-08T15:36:23.301819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 38 FAKE_COORDINATOR: Erasing txId 102 2026-01-08T15:36:23.302194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.302237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:0 ProgressState 2026-01-08T15:36:23.302363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:36:23.302398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:23.302432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:36:23.302476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:23.302522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2026-01-08T15:36:23.302574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:23.302626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:36:23.302660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:36:23.302732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:36:23.302776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2026-01-08T15:36:23.302810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 8 2026-01-08T15:36:23.302842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 38], 18446744073709551615 2026-01-08T15:36:23.303595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:23.303722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:23.303763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:36:23.303800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2026-01-08T15:36:23.303862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:23.304805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:23.304886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:23.304910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:36:23.304937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 18446744073709551615 2026-01-08T15:36:23.304974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:23.305064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2026-01-08T15:36:23.305976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:36:23.306029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 38], at schemeshard: 72057594046678944 2026-01-08T15:36:23.306102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:23.308303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:36:23.310123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:36:23.310257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 102 2026-01-08T15:36:23.310637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:36:23.310731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2026-01-08T15:36:23.310823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2026-01-08T15:36:23.310843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2026-01-08T15:36:23.310885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2026-01-08T15:36:23.310903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2026-01-08T15:36:23.311448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:36:23.311584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2026-01-08T15:36:23.311630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.311684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:731:2720] 2026-01-08T15:36:23.311802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.311826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:731:2720] 2026-01-08T15:36:23.311953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2026-01-08T15:36:23.312076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.312099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:731:2720] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2026-01-08T15:36:23.312588Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:23.312787Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 236us result status StatusPathDoesNotExist 2026-01-08T15:36:23.312974Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000037 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get [GOOD] Test command err: 2026-01-08T15:36:18.653517Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015036035703620:2079];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:18.655329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:19.023126Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:19.074665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:19.074781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:19.133802Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:19.156101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:19.222726Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:19.463483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:19.470018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/library/ycloud/impl/ut/unittest >> TCacheTest::RacyCreateAndSync [GOOD] >> TSchemeShardViewTest::DropView [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration >> TConsoleTests::TestCreateTenantWrongName [GOOD] >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:22.287304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:22.287436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:22.287483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:22.287519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:22.287579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:22.287624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:22.287706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:22.287777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:22.288722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:22.289090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:22.377745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:22.377827Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:22.390587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:22.390948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:22.391148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:22.405987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:22.406514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:22.407303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:22.408262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:22.412382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:22.412642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:22.413905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:22.413979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:22.414097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:22.414142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:22.414246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:22.414441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:22.577488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.585571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.585822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.585928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:22.586898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... EBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 FAKE_COORDINATOR: Erasing txId 101 2026-01-08T15:36:23.710539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.710635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.710879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:36:23.711003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.711050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2026-01-08T15:36:23.711103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 101, path id: 38 2026-01-08T15:36:23.711636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.711722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 101:0 ProgressState 2026-01-08T15:36:23.711832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:23.711873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:23.711916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:23.711957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:23.712008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2026-01-08T15:36:23.712053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:23.712111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:0 2026-01-08T15:36:23.712160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:0 2026-01-08T15:36:23.712264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:36:23.712306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2026-01-08T15:36:23.712340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2026-01-08T15:36:23.712372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 38], 2 2026-01-08T15:36:23.713174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:23.713315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:23.713362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:23.713411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2026-01-08T15:36:23.713467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:23.728048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:23.728253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:23.728301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:23.728369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 2 2026-01-08T15:36:23.728411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:23.728520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2026-01-08T15:36:23.733891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:36:23.735173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2026-01-08T15:36:23.735487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2026-01-08T15:36:23.735528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2026-01-08T15:36:23.735625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:36:23.735645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2026-01-08T15:36:23.735721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2026-01-08T15:36:23.735752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2026-01-08T15:36:23.736232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2026-01-08T15:36:23.736418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.736457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:703:2693] 2026-01-08T15:36:23.736650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:36:23.736764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2026-01-08T15:36:23.736815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.736838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:703:2693] 2026-01-08T15:36:23.736920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:36:23.737000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:703:2693] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2026-01-08T15:36:23.737538Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:23.737762Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 248us result status StatusSuccess 2026-01-08T15:36:23.738180Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 38 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::CreateView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:22.878280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:22.878388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:22.878425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:22.878459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:22.878516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:22.878549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:22.878622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:22.878715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:22.879623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:22.879953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:22.971027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:22.971081Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:22.983899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:22.984231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:22.984465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:23.005690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:23.006119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:23.006848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:23.007699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:23.010888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.011110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:23.012337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.012400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.012523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:23.012563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.012660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:23.012806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.163337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.164947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.165461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2026-01-08T15:36:24.218401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2026-01-08T15:36:24.218543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 2026-01-08T15:36:24.218896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000038, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:24.219034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000038 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:24.219089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000038 2026-01-08T15:36:24.219225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 101:0 128 -> 240 2026-01-08T15:36:24.219431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:24.219498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 FAKE_COORDINATOR: Erasing txId 101 2026-01-08T15:36:24.222262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:24.222354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:24.222527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:36:24.222653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:24.222700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2026-01-08T15:36:24.222812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 101, path id: 38 2026-01-08T15:36:24.223278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:24.223333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 101:0 ProgressState 2026-01-08T15:36:24.223444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:24.223496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:24.223536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:24.223573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:24.223617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2026-01-08T15:36:24.223675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:24.223711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:0 2026-01-08T15:36:24.223744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:0 2026-01-08T15:36:24.223811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:36:24.223857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2026-01-08T15:36:24.223892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2026-01-08T15:36:24.223925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 38], 2 2026-01-08T15:36:24.224570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:24.224677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:24.224726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:24.224769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2026-01-08T15:36:24.224810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:24.225724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:24.225807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:24.225840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:24.225869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 2 2026-01-08T15:36:24.225900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:24.225969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2026-01-08T15:36:24.228722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:36:24.229946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:24.230186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2026-01-08T15:36:24.230287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2026-01-08T15:36:24.230746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2026-01-08T15:36:24.230854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2026-01-08T15:36:24.230913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:701:2691] TestWaitNotification: OK eventTxId 101 2026-01-08T15:36:24.231410Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:24.231669Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 257us result status StatusSuccess 2026-01-08T15:36:24.232086Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 38 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem >> TSchemeShardViewTest::ReadOnlyMode [GOOD] >> TSchemeShardViewTest::EmptyQueryText [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::RacyCreateAndSync [GOOD] Test command err: 2026-01-08T15:36:23.395863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:23.395945Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... 2 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:24.906650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:24.926280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> KqpService::CloseSessionsWithLoad [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:23.394000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:23.394080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.394113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:23.394147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:23.394202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:23.394249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:23.394302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.394436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:23.395307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:23.395714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:23.487357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:23.487439Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:23.500920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:23.501317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:23.501547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:23.525134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:23.525631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:23.526463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:23.527423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:23.531392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.531677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:23.532993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.533069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.533190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:23.533239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.533362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:23.533539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.693319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.694954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.695708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:24.899466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:36:24.899492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 2 2026-01-08T15:36:24.899521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:36:24.899584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2026-01-08T15:36:24.903008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:36:24.903150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2026-01-08T15:36:24.903499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2026-01-08T15:36:24.903544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2026-01-08T15:36:24.903637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:36:24.903749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2026-01-08T15:36:24.903815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2026-01-08T15:36:24.903837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2026-01-08T15:36:24.904482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2026-01-08T15:36:24.904604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:36:24.904665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2026-01-08T15:36:24.904743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2026-01-08T15:36:24.904780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:732:2722] 2026-01-08T15:36:24.904943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:36:24.904978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:732:2722] 2026-01-08T15:36:24.905070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:36:24.905091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:732:2722] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2026-01-08T15:36:24.905685Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:24.905916Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir" took 259us result status StatusSuccess 2026-01-08T15:36:24.906535Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir" PathDescription { Self { Name: "SomeDir" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "FirstView" PathId: 39 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000040 ParentPathId: 38 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SecondView" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000039 ParentPathId: 38 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 39 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:24.907132Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/FirstView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:24.907320Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/FirstView" took 195us result status StatusSuccess 2026-01-08T15:36:24.907641Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/FirstView" PathDescription { Self { Name: "FirstView" PathId: 39 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000040 ParentPathId: 38 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 39 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "FirstView" PathId { OwnerId: 72057594046678944 LocalId: 39 } Version: 1 QueryText: "First query" CapturedContext { } } } PathId: 39 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:24.908209Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/SecondView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:24.908430Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/SecondView" took 231us result status StatusSuccess 2026-01-08T15:36:24.908715Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/SecondView" PathDescription { Self { Name: "SecondView" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000039 ParentPathId: 38 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 39 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "SecondView" PathId { OwnerId: 72057594046678944 LocalId: 40 } Version: 1 QueryText: "Second query" CapturedContext { } } } PathId: 40 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits [GOOD] >> TConsoleTests::TestModifyUsedZoneKind >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:23.263059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:23.263170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.263207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:23.263243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:23.263292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:23.263322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:23.263395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.263464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:23.264342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:23.264698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:23.380104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:23.380170Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:23.393039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:23.393346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:23.393539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:23.405823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:23.406138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:23.406664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:23.407292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:23.409890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.410063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:23.411190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.411257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.411474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:23.411525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.411626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:23.411811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.568152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.569991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.570061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.570133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.570217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.570337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.570434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... tion.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:36:24.723054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2026-01-08T15:36:24.723225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000039 2026-01-08T15:36:24.723625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000039, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:24.723782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000039 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:24.723841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000039 2026-01-08T15:36:24.723997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 102:0 128 -> 240 2026-01-08T15:36:24.724212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:24.724292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 FAKE_COORDINATOR: Erasing txId 102 2026-01-08T15:36:24.726714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:24.726778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:24.727002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:36:24.727186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:24.727237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2026-01-08T15:36:24.727290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 102, path id: 38 2026-01-08T15:36:24.727739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2026-01-08T15:36:24.727795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 102:0 ProgressState 2026-01-08T15:36:24.727932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:36:24.727977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:24.728048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#102:0 progress is 1/1 2026-01-08T15:36:24.728088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:24.728149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2026-01-08T15:36:24.728227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2026-01-08T15:36:24.728278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 102:0 2026-01-08T15:36:24.728314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 102:0 2026-01-08T15:36:24.728400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:36:24.728449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2026-01-08T15:36:24.728483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 8 2026-01-08T15:36:24.728517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 38], 18446744073709551615 2026-01-08T15:36:24.729466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:24.729579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:24.729621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:36:24.729660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2026-01-08T15:36:24.729707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:24.731137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:24.731266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2026-01-08T15:36:24.731300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2026-01-08T15:36:24.731351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 18446744073709551615 2026-01-08T15:36:24.731388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:24.731499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2026-01-08T15:36:24.731845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:36:24.731921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 38], at schemeshard: 72057594046678944 2026-01-08T15:36:24.732019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:24.734710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:36:24.736802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2026-01-08T15:36:24.736928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2026-01-08T15:36:24.737225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2026-01-08T15:36:24.737274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2026-01-08T15:36:24.737697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:36:24.737813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:36:24.737860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:725:2715] TestWaitNotification: OK eventTxId 102 2026-01-08T15:36:24.738393Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:24.738597Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 244us result status StatusPathDoesNotExist 2026-01-08T15:36:24.738790Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000037 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest >> TCacheTest::Recreate >> TConsoleConfigTests::TestAddConfigItem [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts >> TCacheTest::MigrationLostMessage >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:23.497485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:23.497582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.497623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:23.497663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:23.497732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:23.497770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:23.497843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:23.497934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:23.498956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:23.499283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:23.623625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:23.623716Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:23.634761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:23.635039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:23.635197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:23.650750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:23.651234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:23.652097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:23.653162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:23.657301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.657520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:23.658836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:23.658921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:23.659114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:23.659173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:23.659304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:23.659499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:23.818659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.819797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.819949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.820895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:23.821001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... less_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:25.285102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 Leader for TabletID 72057594046678944 is [1:786:2755] sender: [1:844:2058] recipient: [1:15:2062] 2026-01-08T15:36:25.332348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "ThirdView" QueryText: "Some query" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:36:25.332615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0 2026-01-08T15:36:25.332684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0, viewDescription: Name: "ThirdView" QueryText: "Some query" 2026-01-08T15:36:25.332839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:442: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: ThirdView, child id: [OwnerId: 72057594046678944, LocalPathId: 39], at schemeshard: 72057594046678944 2026-01-08T15:36:25.332927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 0 2026-01-08T15:36:25.332970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 103:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 39] source path: 2026-01-08T15:36:25.333020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2026-01-08T15:36:25.348976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 39, at schemeshard: 72057594046678944 2026-01-08T15:36:25.349238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/ThirdView 2026-01-08T15:36:25.349522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2026-01-08T15:36:25.349622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 103:0 ProgressState 2026-01-08T15:36:25.349681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2026-01-08T15:36:25.349801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:36:25.354583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2026-01-08T15:36:25.354825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000039 2026-01-08T15:36:25.355692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000039, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:25.355838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000039 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:25.355900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000039 2026-01-08T15:36:25.356056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 103:0 128 -> 240 2026-01-08T15:36:25.356249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:25.356335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 FAKE_COORDINATOR: Erasing txId 103 2026-01-08T15:36:25.364167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:25.364242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:25.364461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:36:25.364577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:25.364616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:835:2793], at schemeshard: 72057594046678944, txId: 103, path id: 1 2026-01-08T15:36:25.364686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:835:2793], at schemeshard: 72057594046678944, txId: 103, path id: 39 2026-01-08T15:36:25.365109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2026-01-08T15:36:25.365165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 103:0 ProgressState 2026-01-08T15:36:25.365264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:36:25.365298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:36:25.365349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#103:0 progress is 1/1 2026-01-08T15:36:25.365381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:36:25.365420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2026-01-08T15:36:25.365468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2026-01-08T15:36:25.365502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 103:0 2026-01-08T15:36:25.365538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 103:0 2026-01-08T15:36:25.365643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:36:25.365682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2026-01-08T15:36:25.365722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 8 2026-01-08T15:36:25.365759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 39], 2 2026-01-08T15:36:25.367294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:36:25.367437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:36:25.367490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:36:25.367529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2026-01-08T15:36:25.367569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:36:25.368599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 39 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:36:25.368708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 39 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2026-01-08T15:36:25.368753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2026-01-08T15:36:25.368782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 2 2026-01-08T15:36:25.368824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:36:25.368907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2026-01-08T15:36:25.373088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2026-01-08T15:36:25.380610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyQueryText [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:130:2154] sender: [1:131:2058] recipient: [1:113:2143] 2026-01-08T15:36:24.444850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:24.444978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:24.445016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:24.445063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:24.445115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:24.445154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:24.445216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:24.445287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:24.446416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:24.446711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:24.547387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:24.547440Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:24.565084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:24.566011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:24.566206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:24.572277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:24.572491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:24.573149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:24.573363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:24.575232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:24.575426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:24.576551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:24.576624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:24.576728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:24.576773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:24.576813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:24.577009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:24.745923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.747941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.748015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:24.748120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2214] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... :2660] Become StateWork (SchemeCache [1:676:2665]) 2026-01-08T15:36:25.795503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "MyView" QueryText: "" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:36:25.795767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0 2026-01-08T15:36:25.795853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0, viewDescription: Name: "MyView" QueryText: "" 2026-01-08T15:36:25.796019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:442: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: MyView, child id: [OwnerId: 72057594046678944, LocalPathId: 38], at schemeshard: 72057594046678944 2026-01-08T15:36:25.796085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 0 2026-01-08T15:36:25.796127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 101:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 38] source path: 2026-01-08T15:36:25.796177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 101:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2026-01-08T15:36:25.797327Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:212: actor# [1:671:2660] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2026-01-08T15:36:25.799682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 101, response: Status: StatusAccepted TxId: 101 SchemeshardId: 72057594046678944 PathId: 38, at schemeshard: 72057594046678944 2026-01-08T15:36:25.799964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/MyView 2026-01-08T15:36:25.800185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:25.800240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 101:0 ProgressState 2026-01-08T15:36:25.800289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2026-01-08T15:36:25.800404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:36:25.802472Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2026-01-08T15:36:25.803434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2026-01-08T15:36:25.803563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 2026-01-08T15:36:25.803927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000038, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:25.804065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000038 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:25.804131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000038 2026-01-08T15:36:25.804278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 101:0 128 -> 240 2026-01-08T15:36:25.804446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:36:25.804510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:25.807228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:25.807301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:25.807534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 38] 2026-01-08T15:36:25.807672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:25.807720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 1 2026-01-08T15:36:25.807768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 101, path id: 38 FAKE_COORDINATOR: Erasing txId 101 2026-01-08T15:36:25.808083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:25.808135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 101:0 ProgressState 2026-01-08T15:36:25.808232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:25.808289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:25.808331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:25.808384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:25.808424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2026-01-08T15:36:25.808464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:25.808500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:0 2026-01-08T15:36:25.808529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:0 2026-01-08T15:36:25.808606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 2 2026-01-08T15:36:25.808646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2026-01-08T15:36:25.808706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2026-01-08T15:36:25.808744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 38], 2 2026-01-08T15:36:25.809598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:25.809711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:25.809750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:25.809792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2026-01-08T15:36:25.809840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:25.810942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:25.811053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:25.811089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:25.811128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 2 2026-01-08T15:36:25.811167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 1 2026-01-08T15:36:25.811234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2026-01-08T15:36:25.814389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:36:25.815439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_view/unittest >> TCacheTest::Recreate [GOOD] >> TCacheTest::SysLocks >> TCacheTest::List >> TCacheTest::SysLocks [GOOD] >> TCacheTest::Attributes >> TCacheTest::SystemViews |88.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag >> TCacheTest::MigrationLostMessage [GOOD] >> TCacheTest::MigrationUndo >> KqpPg::TempTablesSessionsIsolation [FAIL] >> KqpPg::TempTablesDrop >> TConsoleConfigTests::TestConfigureOrderConflicts [GOOD] >> TConsoleConfigTests::TestGetItems ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::Get [GOOD] Test command err: 2026-01-08T15:36:18.609509Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015039386460756:2073];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:18.609590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:18.932078Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:18.971464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:18.971570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:19.041143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:19.104955Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:19.130402Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:19.337618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:22.731082Z node 2 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7593015053562527960:2145];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:22.731329Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:22.829194Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:23.018603Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593015053562527843:2081] 1767886582707717 != 1767886582707720 2026-01-08T15:36:23.029600Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:23.029968Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:23.030667Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:23.030827Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:23.037387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:23.419900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:23.431138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:23.504116Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions |88.4%| [TM] {BAZEL_UPLOAD} ydb/library/ycloud/impl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> KqpService::CloseSessionsWithLoad [GOOD] Test command err: Trying to start YDB, gRPC: 24269, MsgBus: 16846 2026-01-08T15:36:02.956003Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014970068854105:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:02.956071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:03.477303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:03.477478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:03.536872Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:03.545084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:03.613065Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:03.613833Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014970068854077:2081] 1767886562949283 != 1767886562949286 2026-01-08T15:36:03.694778Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:03.776255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:03.776283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:03.776293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:03.776396Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:03.980925Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:04.961958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:05.221534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.655988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.844412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.929231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:06.923625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014987248725135:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.923755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.924043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014987248725145:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.924103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.450483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.521867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.568328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.604635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.690260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.763147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.845542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.905832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.958912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014970068854105:2068];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:07.959169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:36:08.031374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014995838660615:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:08.031476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:08.031913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014995838660621:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:08.031950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014995838660620:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:08.031979Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:08.037278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:08.095933Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014995838660624:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:36:08.168612Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014995838660678:3774] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:10.420904Z node 1 :KQP_SESSION DEBUG: {KQPSA@kqp_session_actor.cpp:229} SessionId: ydb://session/3?node_id=1&id=NGE4NDBhOTktODFiZDZhZjgtMzVjZTI5YWUtMmJmMmIzNjc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor ydb_session_id# NGE4NDBhOTktODFiZDZhZjgtMzVjZTI5YWUtMmJmMmIzNjc= temp_dir_name# e85cf95e-4743-188f-56dd-13a3af41 ... \"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":21}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":23}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_1\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":20,\"Plans\":[{\"PlanNodeId\":19,\"Plans\":[{\"PlanNodeId\":18,\"Plans\":[{\"PlanNodeId\":17,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":16,\"Operators\":[{\"Scan\":\"Sequential\",\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":16}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":18}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_2\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":15,\"Plans\":[{\"PlanNodeId\":14,\"Plans\":[{\"PlanNodeId\":13,\"Plans\":[{\"PlanNodeId\":12,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":11,\"Operators\":[{\"Scan\":\"Sequential\",\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":11}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":13}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_3\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":10,\"Plans\":[{\"PlanNodeId\":9,\"Plans\":[{\"PlanNodeId\":8,\"Plans\":[{\"PlanNodeId\":7,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":6,\"Operators\":[{\"Scan\":\"Sequential\",\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":6}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":8}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_4\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Sequential\",\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":1}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"UnionAll\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":3}],\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_5\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[{\"name\":\"\\/Root\\/EightShard\",\"reads\":[{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"limit\":\"1001\",\"type\":\"Scan\"}],\"writes\":[{\"columns\":[\"Key\",\"Text\"],\"type\":\"MultiUpsert\"}]}],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Name\":\"Upsert\",\"Table\":\"EightShard\",\"SinkType\":\"KqpTableSink\"}],\"Node Type\":\"Upsert\"},{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_0\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":7,\"Plans\":[{\"PlanNodeId\":8,\"Plans\":[{\"PlanNodeId\":10,\"Plans\":[{\"PlanNodeId\":11,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_1\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":12,\"Plans\":[{\"PlanNodeId\":13,\"Plans\":[{\"PlanNodeId\":15,\"Plans\":[{\"PlanNodeId\":16,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_2\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":17,\"Plans\":[{\"PlanNodeId\":18,\"Plans\":[{\"PlanNodeId\":20,\"Plans\":[{\"PlanNodeId\":21,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_3\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":22,\"Plans\":[{\"PlanNodeId\":23,\"Plans\":[{\"PlanNodeId\":25,\"Plans\":[{\"PlanNodeId\":26,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_4\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":27,\"Plans\":[{\"PlanNodeId\":28,\"Plans\":[{\"PlanNodeId\":30,\"Plans\":[{\"PlanNodeId\":31,\"Operators\":[{\"E-Size\":\"0\",\"ReadLimit\":\"1001\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_1_5\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}","query_id":"dc71185e-b3fbfdd6-6e9d2290-3bc0b869","version":"1.0"} 2026-01-08T15:36:18.100394Z node 1 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:616: Compilation successful, self: [1:7593015021608465172:2768], duration: 3.463827s 2026-01-08T15:36:18.100444Z node 1 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:437: Send response, self: [1:7593015021608465172:2768], owner: [1:7593014987248725096:2385], status: SUCCESS, issues: , uid: dc71185e-b3fbfdd6-6e9d2290-3bc0b869 2026-01-08T15:36:18.100602Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:628: Received response, sender: [1:7593015008723562993:2637], status: SUCCESS, compileActor: [1:7593015021608465172:2768] 2026-01-08T15:36:18.100675Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:901: Send response, sender: [1:7593015008723562993:2637], queryUid: dc71185e-b3fbfdd6-6e9d2290-3bc0b869, status:SUCCESS still compiling... 0 2026-01-08T15:36:18.382447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:18.382492Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded received non-success status for session 30 received non-success status for session 31 still active sessions ... 0 received non-success status for session 32 received non-success status for session 16 received non-success status for session 2 received non-success status for session 21 received non-success status for session 4 received non-success status for session 0 received non-success status for session 3 received non-success status for session 23 received non-success status for session 27 received non-success status for session 10 received non-success status for session 28 received non-success status for session 29 received non-success status for session 1 received non-success status for session 48 received non-success status for session 18 received non-success status for session 34 received non-success status for session 20 received non-success status for session 37 received non-success status for session 44 received non-success status for session received non-success status for session received non-success status for session 392522 received non-success status for session 24 received non-success status for session 45 received non-success status for session 46 received non-success status for session 41 received non-success status for session 36 received non-success status for session 33 received non-success status for session 9 received non-success status for session received non-success status for session received non-success status for session 4315received non-success status for session 47 received non-success status for session received non-success status for session received non-success status for session 3549 42 38 received non-success status for session 5 received non-success status for session 19 received non-success status for session 26 received non-success status for session 40 received non-success status for session 17 >> TCacheTest::Navigate >> TCacheTest::MigrationCommon |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] >> TCacheTest::List [GOOD] >> TCacheTest::MigrationCommit >> TConsoleTests::TestSetConfig [GOOD] >> TConsoleTests::TestTenantGeneration >> TCacheTest::Attributes [GOOD] >> TCacheTest::CheckAccess >> TCacheTest::SystemViews [GOOD] >> TCacheTest::TableSchemaVersion >> HttpRequest::ProbeBaseStats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::SysLocks [GOOD] Test command err: 2026-01-08T15:36:26.714875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:26.714945Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... tep: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:27.793352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantWrongPool >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] >> TCacheTest::Navigate [GOOD] >> TCacheTest::PathBelongsToDomain >> TCacheTestWithoutRealSystemViewPaths::SystemViews >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart >> TCacheTest::CheckAccess [GOOD] >> TCacheTest::CheckSystemViewAccess >> TConsoleConfigTests::TestGetItems [GOOD] >> TConsoleConfigTests::TestGetNodeItems >> TCacheTest::PathBelongsToDomain [GOOD] >> TCacheTestWithoutRealSystemViewPaths::SystemViews [GOOD] >> TCacheTestWithoutRealSystemViewPaths::CheckSystemViewAccess >> TCacheTest::TableSchemaVersion [GOOD] >> TCacheTest::MigrationUndo [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> TCacheTest::CheckSystemViewAccess [GOOD] >> TCacheTestWithoutRealSystemViewPaths::CheckSystemViewAccess [GOOD] >> TCacheTest::MigrationCommon [GOOD] >> TCacheTest::MigrationDeletedPathNavigate >> TCacheTest::MigrationCommit [GOOD] >> TCacheTest::CookiesArePreserved >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::PathBelongsToDomain [GOOD] Test command err: 2026-01-08T15:36:28.783664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.783731Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... Step5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:30.325234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:30.337320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2026-01-08T15:36:30.338504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000039 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000040 FAKE_COORDINATOR: advance: minStep5000040 State->FrontStep: 5000039 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000040 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 102 TestWaitNotification wait txId: 103 2026-01-08T15:36:30.353446Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2026-01-08T15:36:30.353677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 TestModificationResults wait txId: 104 2026-01-08T15:36:30.355088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 104 at step: 5000041 FAKE_COORDINATOR: advance: minStep5000041 State->FrontStep: 5000040 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000041 FAKE_COORDINATOR: Erasing txId 104 TestModificationResult got TxId: 104, wait until txId: 104 TestModificationResults wait txId: 105 FAKE_COORDINATOR: Add transaction: 105 at step: 5000042 FAKE_COORDINATOR: advance: minStep5000042 State->FrontStep: 5000041 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 105 at step: 5000042 FAKE_COORDINATOR: Erasing txId 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 104 TestWaitNotification wait txId: 105 2026-01-08T15:36:30.370425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2026-01-08T15:36:30.370737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestWaitNotification: OK eventTxId 105 2026-01-08T15:36:30.378835Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:505:2473], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:36:30.379259Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:507:2475], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:30.379630Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:509:2477], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:30.381822Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:522:2484], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:36:30.383354Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:531:2487], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:36:30.384735Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:539:2495], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:36:30.385075Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:541:2497], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:30.385394Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:543:2499], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:30.386306Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:549:2505], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:36:30.386705Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:551:2507], domain# [OwnerId: 72057594046678944, LocalPathId: 39], path's domain# [OwnerId: 72057594046678944, LocalPathId: 41] |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTestWithoutRealSystemViewPaths::CheckSystemViewAccess [GOOD] Test command err: 2026-01-08T15:36:30.349792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:30.349865Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded TestModificationResults wait txId: 1 2026-01-08T15:36:30.409459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2026-01-08T15:36:31.030311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:31.030399Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded TestModificationResults wait txId: 1 2026-01-08T15:36:31.072494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2026-01-08T15:36:31.080769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:31.089276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2026-01-08T15:36:31.090191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2026-01-08T15:36:31.096855Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [2:214:2199], for# user1@builtin, access# DescribeSchema 2026-01-08T15:36:31.098036Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [2:220:2205], for# user1@builtin, access# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::TableSchemaVersion [GOOD] Test command err: 2026-01-08T15:36:28.331985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.332065Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... _COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:30.337690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2026-01-08T15:36:30.400323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) FAKEHIVE 72057594037968897 TEvCreateTablet Owner: 72057594046678944 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 38 BindedChannels { StoragePoolName: "pool-1" StoragePoolKind: "pool-kind-1" } BindedChannels { StoragePoolName: "pool-1" StoragePoolKind: "pool-kind-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000038 FAKE_COORDINATOR: Erasing txId 101 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2026-01-08T15:36:30.594865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:187) TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000039 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000039 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified >> TCacheTest::CookiesArePreserved [GOOD] >> TConsoleTests::TestModifyUsedZoneKind [GOOD] >> TConsoleTests::TestRemoveTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CheckSystemViewAccess [GOOD] Test command err: 2026-01-08T15:36:28.309059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.309125Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... ate->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:31.017845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2026-01-08T15:36:31.030509Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:31.042373Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2026-01-08T15:36:31.046543Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [3:430:2410], for# user1@builtin, access# DescribeSchema 2026-01-08T15:36:31.047322Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:324: Access denied: self# [3:436:2416], for# user1@builtin, access# |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> TConsoleConfigTests::TestGetNodeItems [GOOD] >> TConsoleConfigTests::TestGetNodeConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationUndo [GOOD] Test command err: 2026-01-08T15:36:26.864956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:26.865015Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720667 2026-01-08T15:36:30.536602Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_request_units_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.536676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720668 2026-01-08T15:36:30.536730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720669 2026-01-08T15:36:30.536783Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_read_bytes_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.536891Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/resource_pool_classifiers', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.537237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720670 2026-01-08T15:36:30.537288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720671 2026-01-08T15:36:30.537338Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/nodes', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.537487Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_cpu_time_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.537681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720672 2026-01-08T15:36:30.537740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720673 2026-01-08T15:36:30.537778Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_by_tli_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.537951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720674 2026-01-08T15:36:30.538007Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_cpu_time_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.538118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720675 2026-01-08T15:36:30.538163Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_by_tli_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.538269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720676 2026-01-08T15:36:30.538310Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_request_units_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545124Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720677 2026-01-08T15:36:30.545435Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/pg_tables', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720678 2026-01-08T15:36:30.545592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720679 2026-01-08T15:36:30.545699Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_duration_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720680 2026-01-08T15:36:30.545831Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_group_members', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545896Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_users', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.545971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720681 2026-01-08T15:36:30.546026Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/resource_pools', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.546079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720682 2026-01-08T15:36:30.546125Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/streaming_queries', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.546184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720683 2026-01-08T15:36:30.546230Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_permissions', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.546271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720684 2026-01-08T15:36:30.546328Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720685 2026-01-08T15:36:30.546392Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.546497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720686 2026-01-08T15:36:30.546538Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_effective_permissions', reason: Schema is in ReadOnly mode 2026-01-08T15:36:30.552818Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_groups', reason: Schema is in ReadOnly mode TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2026-01-08T15:36:30.556067Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1466: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2026-01-08T15:36:30.556256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:601) 2026-01-08T15:36:30.569021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 0 TabletID: 72075186233409549 Forgetting tablet 72075186233409549 2026-01-08T15:36:30.587188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2026-01-08T15:36:30.589929Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2026-01-08T15:36:30.594516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2026-01-08T15:36:30.597246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2026-01-08T15:36:30.606095Z node 2 :TX_DATASHARD ERROR: datashard.cpp:3603: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186233409548 Leader for TabletID 72057594046678944 is [2:408:2397] sender: [2:733:2067] recipient: [2:49:2096] Leader for TabletID 72057594046678944 is [2:408:2397] sender: [2:736:2067] recipient: [2:735:2628] Leader for TabletID 72057594046678944 is [2:737:2629] sender: [2:738:2067] recipient: [2:735:2628] 2026-01-08T15:36:30.749866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:30.749928Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> HttpRequest::AnalyzeServerless [GOOD] >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed >> TSchemeShardExportToFsTests::ShouldSucceedCreateExportToFs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CookiesArePreserved [GOOD] Test command err: 2026-01-08T15:36:27.924107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:27.924151Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... RDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:31.911081Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2026-01-08T15:36:31.919168Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000038 FAKE_COORDINATOR: advance: minStep5000038 State->FrontStep: 5000037 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000038 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2026-01-08T15:36:31.935976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000039 FAKE_COORDINATOR: advance: minStep5000039 State->FrontStep: 5000038 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000039 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 |88.4%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> TExportToS3Tests::IndexMaterializationDisabled >> TConsoleTests::TestCreateTenantWrongPool [GOOD] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain >> TExportToS3Tests::DropSourceTableBeforeTransferring |88.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |88.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |88.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} |88.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> TConsoleConfigTests::TestGetNodeConfig [GOOD] >> TConsoleConfigTests::TestAutoOrder >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |88.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> TConsoleTests::TestTenantGeneration [GOOD] >> TConsoleTests::TestTenantGenerationExtSubdomain >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::AnalyzeServerless [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:15.077433Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:15.178429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:36:15.187932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:449:2406], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:15.188298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:15.188564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:15.602822Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:15.721163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:15.721335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:15.765329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:15.928050Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:16.765236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:16.766576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:16.766635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:16.766668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:16.767208Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:16.832548Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:17.487302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:36:21.308149Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:21.313171Z node 2 :STATISTICS INFO: service_impl.cpp:234: Subscribed for config changes on node 2 2026-01-08T15:36:21.321542Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:36:21.370989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:21.371123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:21.405941Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:36:21.408315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:21.746758Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:21.746883Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:21.748901Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.750945Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.753916Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.755298Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.756082Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.756310Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.757122Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.757265Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.757769Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:21.776246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:22.112361Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:22.186369Z node 2 :STATISTICS INFO: aggregator_impl.cpp:43: [72075186224037894] OnActivateExecutor 2026-01-08T15:36:22.186516Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2026-01-08T15:36:22.228181Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2026-01-08T15:36:22.229913Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2026-01-08T15:36:22.230281Z node 2 :STATISTICS DEBUG: tx_init.cpp:124: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2026-01-08T15:36:22.230396Z node 2 :STATISTICS DEBUG: tx_init.cpp:149: [72075186224037894] Loaded ColumnStatistics: column count# 0 2026-01-08T15:36:22.230479Z node 2 :STATISTICS DEBUG: tx_init.cpp:188: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2026-01-08T15:36:22.230555Z node 2 :STATISTICS DEBUG: tx_init.cpp:224: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2026-01-08T15:36:22.230622Z node 2 :STATISTICS DEBUG: tx_init.cpp:273: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2026-01-08T15:36:22.230691Z node 2 :STATISTICS DEBUG: tx_init.cpp:280: [72075186224037894] TTxInit::Complete 2026-01-08T15:36:22.231767Z node 2 :STATISTICS INFO: aggregator_impl.cpp:69: [72075186224037894] Subscribed for config changes 2026-01-08T15:36:22.240074Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8331: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2026-01-08T15:36:22.240181Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8361: ConnectToSA(), pipe client id: [2:2112:2577], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2026-01-08T15:36:22.268939Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:2151:2614] 2026-01-08T15:36:22.270446Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:224: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2151:2614], schemeshard id = 72075186224037897 2026-01-08T15:36:22.376289Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:2227:2652] 2026-01-08T15:36:22.379404Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2026-01-08T15:36:22.416064Z node 2 :STATISTICS DEBUG: table_creator.cpp:191: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Describe result: PathErrorUnknown 2026-01-08T15:36:22.416137Z node 2 :STATISTICS NOTICE: table_creator.cpp:211: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Creating table 2026-01-08T15:36:22.416218Z node 2 :STATISTICS DEBUG: table_creator.cpp:112: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Created ESchemeOpCreateTable transaction for path: /Root/Shared/.metadata/_statistics 2026-01-08T15:36:22.425633Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:2311:2693], domain# [OwnerId: 72057594046644480, LocalPathId: 38], path's domain# [OwnerId: 72057594046644480, LocalPathId: 1] 2026-01-08T15:36:22.430934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:22.439288Z node 2 :STATISTICS DEBUG: table_creator.cpp:229: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 33 } 2026-01-08T15:36:22.439456Z node 2 :STATISTICS DEBUG: table_creator.cpp:304: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Subscribe on create table tx: 281474976720657 2026-01-08T15:36:22.454074Z node 2 :STATISTICS DEBUG: table_creator.cpp:329: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Subscribe on tx: 281474976720657 registered 2026-01-08T15:36:22.625503Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/Shared/.metadata/script_executions 2026-01-08T15:36:22.664201Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2026-01-08T15:36:23.243613Z node 2 :STATISTICS DEBUG: table_creator.cpp:334: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Request: create. Transaction completed: 281474976720657. Doublechecking... 2026-01-08T15:36:23.371084Z node 2 :STATISTICS DEBUG: table_creator.cpp:221: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Table already exists, number of columns: 5, has SecurityObject: true 2026-01-08T15:36:23.371191Z node 2 :STATISTICS DEBUG: table_creator.cpp:441: Table _statistics updater. SelfId: [2:2240:2661] Owner: [2:2236:2660]. Column diff is empty, finishing 2026-01-08T15:36:24.051298Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026- ... src=[2:2908:3024];cookie=101:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.833607Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;self_id=[2:3596:3454];ev=NActors::IEventHandle;tablet_id=72075186224037911;tx_id=281474976715661;this=137005579217792;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4509;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=131:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.834209Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037910;self_id=[2:3600:3457];ev=NActors::IEventHandle;tablet_id=72075186224037910;tx_id=281474976715661;this=137005579226528;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4509;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=121:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.877331Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037909;self_id=[2:3615:3465];ev=NActors::IEventHandle;tablet_id=72075186224037909;tx_id=281474976715661;this=137005579214208;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4510;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=111:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.877947Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;self_id=[2:3617:3467];ev=NActors::IEventHandle;tablet_id=72075186224037907;tx_id=281474976715661;this=137005579222496;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4510;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=91:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.878487Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037912;self_id=[2:3618:3468];ev=NActors::IEventHandle;tablet_id=72075186224037912;tx_id=281474976715661;this=137005579212864;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4510;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=141:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.878994Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037913;self_id=[2:3619:3469];ev=NActors::IEventHandle;tablet_id=72075186224037913;tx_id=281474976715661;this=137005579212416;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4510;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=151:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:27.900638Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037914;self_id=[2:3623:3472];ev=NActors::IEventHandle;tablet_id=72075186224037914;tx_id=281474976715661;this=137005579231680;method=TTxController::StartProposeOnExecute;tx_info=281474976715661:TX_KIND_SCHEMA;min=4511;max=18446744073709551615;plan=0;src=[2:2908:3024];cookie=161:1;;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.143837Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.143997Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.144064Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.189614Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.189716Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.189753Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.201119Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.201220Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.201258Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.217392Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.217504Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.217542Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.230309Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.230438Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.230479Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.246454Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.246567Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.246605Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.257202Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.257316Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.257356Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.266961Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.267070Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.267110Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.275742Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.275851Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.275889Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.288047Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.288158Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; 2026-01-08T15:36:28.288195Z node 2 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:57;method=resolve_internal_path_id;ss_local=32;result=not_found; waiting actualization: 0/0.000021s 2026-01-08T15:36:31.131101Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:4478:4114] 2026-01-08T15:36:31.161686Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:24: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:4451:3715] , Record { OperationId: "\000\000\000\000\026\021#\203&0<\266\023\357\264\371" Tables { PathId { OwnerId: 72057594046644480 LocalId: 38 } } Database: "" } 2026-01-08T15:36:31.161784Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:55: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId: `#&0<', DatabaseName: `', Types: 2026-01-08T15:36:31.161841Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:78: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId: `#&0<', PathId: [OwnerId: 72057594046644480, LocalPathId: 38], ColumnTags: Answer: 'Analyze sent. OperationId: 00000005gh4e1jcc1wpr9yzd7s' |88.5%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed >> TExportToS3Tests::IndexMaterializationDisabled [GOOD] >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] >> TSchemeShardExportToFsTests::ShouldSucceedCreateExportToFs [GOOD] >> TSchemeShardExportToFsTests::ShouldFailOnNonExistentPath >> TExportToS3Tests::ShouldSucceedOnManyTables >> TExportToS3Tests::ExportStartTime >> TExportToS3Tests::IndexMaterialization >> TConsoleConfigTests::TestAutoOrder [GOOD] >> TConsoleConfigTests::TestAutoKind >> TExportToS3Tests::RebootDuringCompletion |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |88.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> TExportToS3Tests::ShouldSucceedOnSingleShardTable >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend >> TExportToS3Tests::UidAsIdempotencyKey >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope >> TExportToS3Tests::DropCopiesBeforeTransferring2 |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types12-all_types12-index12---] [GOOD] |88.5%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> TSchemeShardExportToFsTests::ShouldFailOnNonExistentPath [GOOD] >> TSchemeShardExportToFsTests::ShouldFailOnDeletedPath >> KqpPg::TempTablesDrop [FAIL] >> KqpPg::TempTablesWithCache |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |88.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots >> TConsoleConfigTests::TestAutoKind [GOOD] >> TConsoleConfigTests::TestAutoSplit >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExists >> TExportToS3Tests::ExportStartTime [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> TExportToS3Tests::IndexMaterialization [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::IndexMaterializationGlobal >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply >> TExportToS3Tests::ShouldSucceedOnManyTables [GOOD] >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> TExportToS3Tests::RebootDuringCompletion [GOOD] >> TSchemeShardExportToFsTests::ShouldFailOnDeletedPath [GOOD] >> TSchemeShardExportToFsTests::ShouldExportDataAndSchemaToFs >> TExportToS3Tests::UidAsIdempotencyKey [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> TExportToS3Tests::ShouldSucceedOnSingleShardTable [GOOD] >> TExportToS3Tests::CorruptedDyNumber >> TConsoleConfigTests::TestAutoSplit [GOOD] >> TConsoleConfigTests::TestAllowedScopes >> TExportToS3Tests::RebootDuringAbortion >> TExportToS3Tests::UserSID >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::ShouldSucceedOnMultiShardTable |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> YdbIndexTable::MultiShardTableOneIndexDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb >> TConsoleTests::TestTenantGenerationExtSubdomain [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle >> TExportToS3Tests::IndexMaterializationGlobal [GOOD] >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] >> TExportToS3Tests::UserSID [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed >> TExportToS3Tests::TransferExportNoConnString >> TConsoleConfigTests::TestAllowedScopes [GOOD] >> TConsoleConfigTests::TestCheckConfigUpdates >> TExportToS3Tests::IndexMaterializationGlobalAsync >> TExportToS3Tests::ShouldRestartOnScanErrors >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed [GOOD] >> TExportToS3Tests::CompletedExportEndTime >> TSchemeShardExportToFsTests::ShouldExportDataAndSchemaToFs [GOOD] >> TSchemeShardExportToFsTests::ShouldExportMultipleTablesWithData >> TExportToS3Tests::CancelUponTransferringSingleTableShouldSucceed [GOOD] >> TExportToS3Tests::CheckItemProgress >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig >> TExportToS3Tests::RebootDuringAbortion [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TExportToS3Tests::ShouldSucceedOnMultiShardTable [GOOD] >> TExportToS3Tests::ReplicationExportWithStaticCredentials >> TExportToS3Tests::TablePermissions |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem >> TConsoleConfigTests::TestCheckConfigUpdates [GOOD] >> TConsoleConfigTests::TestManageValidators >> TExportToS3Tests::TransferExportNoConnString [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] >> TConsoleTests::TestRemoveTenant [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig [GOOD] >> TExportToS3Tests::TransferExportWithConnString >> KqpQueryService::CloseSessionsWithLoad [GOOD] >> TExportToS3Tests::EnableChecksumsPersistance >> TExportToS3Tests::IndexMaterializationGlobalAsync [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TConsoleTests::TestCreateTenantAlreadyExists [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain >> TExportToS3Tests::ReplicationExportWithStaticCredentials [GOOD] >> TExportToS3Tests::IndexMaterializationGlobalVectorKmeansTree >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> TExportToS3Tests::ReplicationExportWithOAuthCredentials ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut_console/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig [GOOD] Test command err: 2026-01-08T15:36:07.992096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:07.992165Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:08.250936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:09.689690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:09.689768Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:09.787129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:10.999388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:10.999456Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:11.104598Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:12.231820Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:12.231903Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:12.338850Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:13.553374Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:13.553457Z node 5 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:13.679784Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:14.926577Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:14.926653Z node 6 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:15.143895Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:16.553193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:16.553283Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:16.695805Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:21.083487Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:21.083565Z node 8 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:21.204802Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:25.491340Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:25.491448Z node 9 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:25.591868Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:26.901037Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:26.901123Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:27.051851Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:27.819038Z node 10 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 11 2026-01-08T15:36:27.819893Z node 10 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 11 2026-01-08T15:36:27.819979Z node 10 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 11 2026-01-08T15:36:27.820376Z node 11 :TX_PROXY WARN: proxy_impl.cpp:226: actor# [11:465:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2026-01-08T15:36:28.604053Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.604136Z node 12 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:28.728311Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:30.718938Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:30.719035Z node 14 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:30.848836Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:33.264128Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:33.264216Z node 16 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:33.448098Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:35.692181Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:35.692274Z node 18 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:35.817143Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:37.543667Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:37.543761Z node 20 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:37.650770Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:39.145799Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:39.145877Z node 21 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:39.241739Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:41.414586Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:41.414679Z node 23 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:41.519804Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) |88.5%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/ut_console/unittest >> TExportToS3Tests::CheckItemProgress [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] >> TExportToS3Tests::TablePermissions [GOOD] |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.5%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest >> TExportToS3Tests::TopicExport >> TSchemeShardExportToFsTests::ShouldExportMultipleTablesWithData [GOOD] >> TSchemeShardExportToFsTests::ShouldExportWithCompressionToFs >> TConsoleConfigTests::TestManageValidators [GOOD] >> TConsoleConfigTests::TestDryRun >> TExportToS3Tests::CancelledExportEndTime >> TExportToS3Tests::TransferExportWithConnString [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor >> BsControllerConfig::ManyPDisksRestarts [GOOD] >> BsControllerConfig::MergeBoxes >> TExportToS3Tests::TransferExportWithConsumer >> DistributedEraseTests::ConditionalEraseRowsShouldErase ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/close_with_load/unittest >> KqpQueryService::CloseSessionsWithLoad [GOOD] Test command err: Trying to start YDB, gRPC: 16750, MsgBus: 62782 2026-01-08T15:36:00.925463Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014962191953486:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:00.925521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:01.076715Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:01.922462Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:01.951543Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:02.019805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:02.225675Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:02.370682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:02.370786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:02.414611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:02.521287Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:02.801926Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:02.834478Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.239983s 2026-01-08T15:36:02.834566Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.240103s 2026-01-08T15:36:03.771112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:03.771160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:03.771173Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:03.771293Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:04.957870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:04.984755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:36:05.220998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.642804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.757829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.812225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:05.937612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014962191953486:2065];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:05.938158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:36:06.182751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014987961759132:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.186463Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.190488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014987961759142:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.190596Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:06.900868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:06.983742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.037912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.128690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.184337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.252767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.303080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.370447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:07.505368Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014992256727318:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.505462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.505654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014992256727320:2489], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.505678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.507888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014992256727325:2492], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:07.514489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_poo ... 9))))))))))) )))) ) trace_id# 2026-01-08T15:36:39.595730Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 1 from task: 1 with index: 0 2026-01-08T15:36:39.595740Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 2 from task: 2 with index: 0 2026-01-08T15:36:39.595748Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 3 from task: 3 with index: 0 2026-01-08T15:36:39.595754Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 4 from task: 4 with index: 0 2026-01-08T15:36:39.595761Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 5 from task: 5 with index: 0 2026-01-08T15:36:39.595767Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:340: Create result channelId: 6 from task: 6 with index: 0 2026-01-08T15:36:39.601596Z node 1 :KQP_EXECUTER DEBUG: {KQPLIT@kqp_literal_executer.cpp:293} ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , SessionId: , PoolId: , IsStreamingQuery: 0}. Execution is complete results_size# 6 trace_id# 2026-01-08T15:36:39.627891Z node 1 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:425: [[1:7593015125400715526:3149]]: Built the replay message {"guc_settings":{"session_settings":{"ydb_user":"","ydb_database":"Root"},"settings":{"ydb_user":"","ydb_database":"Root"},"rollback_settings":{}},"query_database":"/Root","query_parameter_types":{},"table_metadata":"[\"CAESAmRiGhAvUm9vdC9FaWdodFNoYXJkIgAqDAiAgpSEgICAgAEQJzABOAFKMwoERGF0YRADGgVJbnQzMiABMABCAEgAUgBYAGIWCP///////////wEQ////////////AUozCgNLZXkQARoGVWludDY0IAQwAEIASABSAFgAYhYI////////////ARD///////////8BSjUKBFRleHQQAhoGU3RyaW5nIIEgMABCAEgAUgBYAGIWCP///////////wEQ////////////AVIDS2V5aABwAHgA\"]","table_meta_serialization_type":1,"created_at":"1767886599","query_text":"\\n SELECT Key, Text, Data FROM `/Root/EightShard` WHERE Key=-580133304 + 0;\\n SELECT Key, Data, Text FROM `/Root/EightShard` WHERE Key=-580133304 + 1;\\n SELECT Text, Key, Data FROM `/Root/EightShard` WHERE Key=-580133304 + 2;\\n SELECT Text, Data, Key FROM `/Root/EightShard` WHERE Key=-580133304 + 3;\\n SELECT Data, Key, Text FROM `/Root/EightShard` WHERE Key=-580133304 + 4;\\n SELECT Data, Text, Key FROM `/Root/EightShard` WHERE Key=-580133304 + 5;\\n\\n UPSERT INTO `/Root/EightShard` (Key, Text) VALUES\\n (-785830837ul, \\\"New\\\");\\n ","query_type":"QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY","query_syntax":"1","query_cluster":"db","query_plan":"{\"Plan\":{\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":21,\"Plans\":[{\"PlanNodeId\":20,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{Key: 18446744072923720779,Text: \\\"New\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Operators\":[{\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"Name\":\"Upsert\",\"Table\":\"EightShard\",\"SinkType\":\"KqpTableSink\"}],\"Node Type\":\"Sink\"},{\"PlanNodeId\":18,\"Plans\":[{\"PlanNodeId\":17,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":16,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_0\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":15,\"Plans\":[{\"PlanNodeId\":14,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":13,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_1\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":12,\"Plans\":[{\"PlanNodeId\":11,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":10,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_2\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":9,\"Plans\":[{\"PlanNodeId\":8,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":7,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_3\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":6,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":4,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_4\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"EightShard\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/EightShard\",\"ReadRangesPointPrefixLen\":\"1\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"Collect\"}],\"Node Type\":\"ResultSet_1_5\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[{\"name\":\"\\/Root\\/EightShard\",\"reads\":[{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"},{\"columns\":[\"Data\",\"Key\",\"Text\"],\"type\":\"Scan\"}],\"writes\":[{\"columns\":[\"Key\",\"Text\"],\"type\":\"MultiUpsert\"}]}],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Name\":\"Upsert\",\"Table\":\"EightShard\",\"SinkType\":\"KqpTableSink\"}],\"Node Type\":\"Upsert\"},{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_0\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":7,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_1\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":8,\"Plans\":[{\"PlanNodeId\":10,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_2\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":11,\"Plans\":[{\"PlanNodeId\":13,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_3\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":14,\"Plans\":[{\"PlanNodeId\":16,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_4\",\"PlanNodeType\":\"ResultSet\"},{\"PlanNodeId\":17,\"Plans\":[{\"PlanNodeId\":19,\"Operators\":[{\"E-Size\":\"0\",\"Name\":\"TableRangeScan\",\"E-Rows\":\"0\",\"Table\":\"EightShard\",\"ReadRangesKeys\":[\"Key\"],\"ReadColumns\":[\"Data\",\"Key\",\"Text\"],\"E-Cost\":\"0\",\"ReadRangesExpectedSize\":\"1\"}],\"Node Type\":\"TableRangeScan\"}],\"Node Type\":\"ResultSet_1_5\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}","query_id":"f62d23af-cc645730-6c8d1063-37ba1945","version":"1.0"} 2026-01-08T15:36:39.630248Z node 1 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:616: Compilation successful, self: [1:7593015125400715526:3149], duration: 1.468989s 2026-01-08T15:36:39.630294Z node 1 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:437: Send response, self: [1:7593015125400715526:3149], owner: [1:7593014983666790804:2310], status: SUCCESS, issues: , uid: f62d23af-cc645730-6c8d1063-37ba1945 2026-01-08T15:36:39.630420Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:628: Received response, sender: [1:7593015009436597046:2692], status: SUCCESS, compileActor: [1:7593015125400715526:3149] 2026-01-08T15:36:39.630603Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:719: Insert query into compile cache, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n SELECT Key, Text, Data FROM `/Root/EightShard` WHERE Key=-580133304 + 0;\n SELECT Key, Data, Text FROM `/Root/EightShard` WHERE Key=-580133304 + 1;\n SELECT Text, Key, Data FROM `/Root/EightShard` WHERE Key=-580133304 + 2;\n SELECT Text, Data, Key FROM `/Root/EightShard` WHERE Key=-580133304 + 3;\n SELECT Data, Key, Text FROM `/Root/EightShard` WHERE Key=-580133304 + 4;\n SELECT Data, Text, Key FROM `/Root/EightShard` WHERE Key=-580133304 + 5;\n\n UPSERT INTO `/Root/EightShard` (Key, Text) VALUES\n (-785830837ul, \"New\");\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY, RuntimeParameterSizeLimit: 9999RuntimeParameterSizeLimitSatisfied: 1}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2026-01-08T15:36:39.630807Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:901: Send response, sender: [1:7593015009436597046:2692], queryUid: f62d23af-cc645730-6c8d1063-37ba1945, status:SUCCESS still compiling... 0 still active sessions ... 0 |88.5%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/close_with_load/unittest >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TExportToS3Tests::EncryptedExport >> TExportToS3Tests::ReplicationExportWithOAuthCredentials [GOOD] |88.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> PgCatalog::CheckSetConfig [GOOD] |88.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> PgCatalog::PgDatabase+useSink |88.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [GOOD] >> TConsoleConfigTests::TestDryRun [GOOD] >> TConsoleConfigTests::TestAffectedConfigs >> TExportToS3Tests::ReplicationExportMultipleItems >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 7357, MsgBus: 26602 2026-01-08T15:34:16.809032Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593014513036208109:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:16.810453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:34:16.841730Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:17.338450Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:34:17.341905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:17.342012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:17.347486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:17.483303Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:17.498487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593014513036208055:2081] 1767886456772745 != 1767886456772748 2026-01-08T15:34:17.672234Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:34:17.796052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:17.796079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:17.796087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:17.796192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:17.821702Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:18.828800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:18.981605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:19.178866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:19.347323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:19.455105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:21.777872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593014513036208109:2082];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:34:21.777946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:34:21.872772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014534511046416:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:21.872891Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:21.875647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014534511046425:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:21.875737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.244589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.313171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.354504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.409864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.465271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.567145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.641560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.721973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:34:22.916215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014538806014602:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.916339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.916674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014538806014608:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.916740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593014538806014607:2490], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.916769Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:34:22.922100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:34:22.961261Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593014538806014611:2492], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:34:23.032200Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593014543100981958:3776] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:34:24.825062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but ... S finished with status: SUCCESS finished with status: SUCCESS Trying to start YDB, gRPC: 13062, MsgBus: 2773 2026-01-08T15:35:34.575636Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:35:34.575737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:35:34.693281Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:35:34.698417Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:7593014847782450455:2081] 1767886534440009 != 1767886534440012 2026-01-08T15:35:34.738100Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:35:34.738961Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:35:34.789706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:35:34.878442Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:35:34.930569Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:35:34.930596Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:35:34.930606Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:35:34.930700Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:35:35.438494Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:35:35.767664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:35:35.781625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:35:35.793368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:35.915866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:36.195896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:36.294845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.718056Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014864962321526:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.718168Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.718521Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014864962321535:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.718581Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:38.804948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.885324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:38.957337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.033254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.124924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.213534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.286179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.416707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:39.591239Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014869257289709:2484], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.591333Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.591924Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014869257289714:2487], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.591923Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7593014869257289715:2488], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.591997Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:39.595614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:39.608089Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7593014869257289718:2489], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:35:39.672098Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:7593014869257289769:3777] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:42.011102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:49.684131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:35:49.684162Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS >> TExportToS3Tests::IndexMaterializationGlobalVectorKmeansTree [GOOD] >> TExportToS3Tests::TopicExport [GOOD] |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/idx_test/unittest >> TExportToS3Tests::IndexMaterializationGlobalVectorKmeansTreePrefix >> TExportToS3Tests::TopicWithPermissionsExport >> TExportToS3Tests::CancelledExportEndTime [GOOD] >> TExportToS3Tests::TransferExportWithConsumer [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds >> TSchemeShardExportToFsTests::ShouldExportWithCompressionToFs [GOOD] >> TExportToS3Tests::Checksums >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain >> TSchemeShardExportToFsTests::ShouldAcceptCompressionForFs >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed [GOOD] >> KqpPg::TempTablesWithCache [GOOD] >> KqpPg::TableDeleteWhere+useSink >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed >> TConsoleConfigTests::TestAffectedConfigs [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly >> TExportToS3Tests::ReplicationExportMultipleItems [GOOD] >> TExportToS3Tests::ReplicationExportGlobalConsistency |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/core-grpc_services-grpc_request_check_actor_ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/core-grpc_services-grpc_request_check_actor_ut |88.6%| [LD] {RESULT} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/core-grpc_services-grpc_request_check_actor_ut >> EraseRowsTests::EraseRowsShouldSuccess |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |88.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TExportToS3Tests::EncryptedExport [GOOD] >> TExportToS3Tests::TopicWithPermissionsExport [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query >> TExportToS3Tests::ExportTableWithUniqueIndex >> TExportToS3Tests::TopicsExport >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain [GOOD] >> TConsoleTests::TestGetUnknownTenantStatus >> ColumnShardTiers::DSConfigsStub [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut_console/unittest >> TConsoleConfigTests::TestAffectedConfigs [GOOD] Test command err: 2026-01-08T15:36:08.632303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:08.632879Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:08.884433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:10.242245Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:10.242311Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:10.330243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:11.495061Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:11.495109Z node 3 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:11.569253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:12.765993Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:12.766087Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:12.880717Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:13.925054Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:13.925114Z node 4 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:14.006588Z node 4 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1206: Couldn't deliver config notification for subscription id=1 tabletid=8651011 serviceid=[0:0:0] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2026-01-08T15:36:14.557784Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:14.557897Z node 5 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:14.672072Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:15.768665Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:15.768760Z node 5 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:15.863403Z node 5 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1206: Couldn't deliver config notification for subscription id=1 tabletid=0 serviceid=[5:8246204620103118691:7960687] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2026-01-08T15:36:16.545000Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:16.545087Z node 6 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:16.672391Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:20.805419Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:20.805496Z node 7 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:20.987717Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:25.236518Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:25.236598Z node 8 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:25.402577Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:26.988240Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:26.988351Z node 9 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:27.102832Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:28.784970Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.785072Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:28.911180Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:31.165141Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:31.165244Z node 11 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:31.298293Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:33.140549Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:33.140636Z node 12 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:33.325285Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:35.435203Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:35.435287Z node 13 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:35.599037Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:37.053238Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:37.053334Z node 14 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:37.164279Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:38.361683Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:38.361778Z node 15 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:38.470508Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:39.644694Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:39.644782Z node 16 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:39.751631Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:41.033813Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:41.033920Z node 17 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:41.116025Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:42.435558Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:42.435673Z node 18 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:42.551629Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:43.995192Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:43.995296Z node 19 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:44.129794Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:45.468822Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:45.468947Z node 20 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:45.587111Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/cms/console/ut_console/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TSchemeShardExportToFsTests::ShouldExportWithCompressionToFs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:34.067568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:34.067686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.067730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:34.067784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:34.067838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:34.067872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:34.067937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.068026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:34.069065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:34.069362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:34.215015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:34.215118Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:34.227658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:34.228026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:34.228255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:34.245245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:34.245669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:34.246533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:34.247258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:34.251225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.251457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:34.252753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:34.252826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.253001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:34.253066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:34.253114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:34.253298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:34.439141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.440404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.440600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.440693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.450450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.450718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.450803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.450926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.451604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... : 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.502908Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.502940Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:45.502975Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 7 2026-01-08T15:36:45.503026Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:36:45.503157Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2026-01-08T15:36:45.505776Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2026-01-08T15:36:45.505900Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000043 FAKE_COORDINATOR: advance: minStep5000043 State->FrontStep: 5000042 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000043 2026-01-08T15:36:45.506554Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000043, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:45.506667Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 25769805934 } } Step: 5000043 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:45.506729Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000043, at schemeshard: 72057594046678944 2026-01-08T15:36:45.506902Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2026-01-08T15:36:45.506989Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710761:0 progress is 1/1 2026-01-08T15:36:45.507040Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2026-01-08T15:36:45.507097Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710761:0 progress is 1/1 2026-01-08T15:36:45.507149Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2026-01-08T15:36:45.507212Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:45.507302Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:36:45.507350Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2026-01-08T15:36:45.507414Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2026-01-08T15:36:45.507476Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710761:0 2026-01-08T15:36:45.507516Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710761:0 2026-01-08T15:36:45.507606Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 2 2026-01-08T15:36:45.507667Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2026-01-08T15:36:45.507724Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 14 2026-01-08T15:36:45.507778Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 39], 18446744073709551615 2026-01-08T15:36:45.509675Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.509861Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.511385Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:45.511447Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:45.511633Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:36:45.511787Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:45.511829Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:206:2207], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2026-01-08T15:36:45.511877Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:206:2207], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 39 FAKE_COORDINATOR: Erasing txId 281474976710761 2026-01-08T15:36:45.512747Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.512848Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.512898Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:45.512966Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 14 2026-01-08T15:36:45.513028Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:36:45.513599Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.513689Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 39 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.513725Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:45.513763Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 39], version: 18446744073709551615 2026-01-08T15:36:45.513798Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 39] was 1 2026-01-08T15:36:45.513893Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2026-01-08T15:36:45.513961Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [6:129:2154] 2026-01-08T15:36:45.514751Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:36:45.514796Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 39], at schemeshard: 72057594046678944 2026-01-08T15:36:45.514869Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:36:45.517160Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.518879Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:45.518986Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:36:45.519044Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2026-01-08T15:36:45.519098Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710761 2026-01-08T15:36:45.521348Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2026-01-08T15:36:45.521421Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [6:919:2877] TestWaitNotification: OK eventTxId 112 >> TSchemeShardExportToFsTests::ShouldAcceptCompressionForFs [GOOD] >> TSchemeShardExportToFsTests::FsExportWithMultipleTables >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorTemporary |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase >> TExportToS3Tests::Checksums [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> TExportToS3Tests::ChecksumsWithCompression >> TExportToS3Tests::IndexMaterializationGlobalVectorKmeansTreePrefix [GOOD] >> TExportToS3Tests::ReplicationExportGlobalConsistency [GOOD] >> TExportToS3Tests::IndexMaterializationTwoTables >> TExportToS3Tests::ReplicatedTableExport ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:34:52.272447Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:52.418296Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:34:52.418762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:34:52.418923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:34:52.775842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:52.775967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:52.906263Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886487836733 != 1767886487836737 2026-01-08T15:34:52.916877Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:52.979735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:53.128909Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:34:54.241812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:54.245101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:54.245166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:54.245198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:54.245450Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:54.276067Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:34:54.446730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/olap/operations/create_store.cpp:461) 2026-01-08T15:34:54.665199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2026-01-08T15:34:54.665613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2026-01-08T15:34:54.665864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2026-01-08T15:34:54.665977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2026-01-08T15:34:54.666106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2026-01-08T15:34:54.666245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2026-01-08T15:34:54.666489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2026-01-08T15:34:54.666626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2026-01-08T15:34:54.666773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2026-01-08T15:34:54.666890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2026-01-08T15:34:54.666996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2026-01-08T15:34:54.667152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CopyBlobIdsToV2; 2026-01-08T15:34:54.667290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreAppearanceSnapshot; 2026-01-08T15:34:54.692892Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:140;event=start_subscribing_metadata; 2026-01-08T15:34:54.696370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2026-01-08T15:34:54.696482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2026-01-08T15:34:54.696615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2026-01-08T15:34:54.696661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2026-01-08T15:34:54.696988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2026-01-08T15:34:54.697047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2026-01-08T15:34:54.697179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2026-01-08T15:34:54.697234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2026-01-08T15:34:54.697319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2026-01-08T15:34:54.697379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2026-01-08T15:34:54.697432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2026-01-08T15:34:54.697500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2026-01-08T15:34:54.697726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2026-01-08T15:34:54.697780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2026-01-08T15:34:54.697914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2026-01-08T15:34:54.697959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2026-01-08T15:34:54.698012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2026-01-08T15:34:54.698063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2026-01-08T15:34:54.698113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2026-01-08T15:34:54.698149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CopyBlobIdsToV2;id=CopyBlobIdsToV2; 2026-01-08T15:34:54.698507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:841: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CopyBlobIdsToV2;id=19; 2026-01-08T ... ted at tablet 72075186224037888 2026-01-08T15:36:33.140540Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:33.140636Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:36:33.140674Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=72075186224037889;has_config=0; 2026-01-08T15:36:33.140711Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:33.140737Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:154 :Restarting tier '/Root/tier2' at tablet 72075186224037889 2026-01-08T15:36:33.140768Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2026-01-08T15:36:33.140806Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:174 :Tier '/Root/tier2' started at tablet 72075186224037889 2026-01-08T15:36:33.140850Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:33.140911Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:36:33.140943Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=72075186224037890;has_config=0; 2026-01-08T15:36:33.140972Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:33.140998Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:154 :Restarting tier '/Root/tier2' at tablet 72075186224037890 2026-01-08T15:36:33.141026Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2026-01-08T15:36:33.141061Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:174 :Tier '/Root/tier2' started at tablet 72075186224037890 2026-01-08T15:36:33.141103Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:33.141309Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:36:33.141343Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2026-01-08T15:36:33.141373Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:33.141401Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:154 :Restarting tier '/Root/tier2' at tablet 0 2026-01-08T15:36:33.141431Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 2026-01-08T15:36:33.141469Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:174 :Tier '/Root/tier2' started at tablet 0 2026-01-08T15:36:33.141509Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:33.141719Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2026-01-08T15:36:33.141820Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037889;self_id=[1:953:2808];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2026-01-08T15:36:33.141944Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037890;self_id=[1:961:2814];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 2026-01-08T15:36:45.194578Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2026-01-08T15:36:45.195272Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2026-01-08T15:36:45.195446Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2026-01-08T15:36:45.195511Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier2;tablet=72075186224037888;has_config=0; 2026-01-08T15:36:45.195565Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.195613Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.195708Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:45.195772Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2026-01-08T15:36:45.195815Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2026-01-08T15:36:45.196106Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2026-01-08T15:36:45.196147Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2026-01-08T15:36:45.196182Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196211Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196251Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:45.196527Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037888;self_id=[1:947:2806];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2026-01-08T15:36:45.196589Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2026-01-08T15:36:45.196621Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier2;tablet=72075186224037889;has_config=0; 2026-01-08T15:36:45.196655Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196682Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196728Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:45.196764Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2026-01-08T15:36:45.196791Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier2;tablet=72075186224037890;has_config=0; 2026-01-08T15:36:45.196820Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196848Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:45.196892Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:45.197158Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037889;self_id=[1:953:2808];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2026-01-08T15:36:45.197248Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037890;self_id=[1:961:2814];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 2026-01-08T15:36:46.174610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:46.174695Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037889 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037889 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037889 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037889 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> TSchemeShardExportToFsTests::FsExportWithMultipleTables [GOOD] >> TExportToS3Tests::ExportTableWithUniqueIndex [GOOD] >> TExportToS3Tests::TopicsExport [GOOD] >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> TExportToS3Tests::TopicsWithPermissionsExport >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink >> TExportToS3Tests::Changefeeds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds >> TExportToS3Tests::ReplicatedTableExport [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient [GOOD] >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits >> TExportToS3Tests::ShouldRetryAtFinalStage >> TConsoleTests::TestGetUnknownTenantStatus [GOOD] >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TSchemeShardExportToFsTests::FsExportWithMultipleTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:37.437462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:37.437544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:37.437584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:37.437631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:37.437677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:37.437707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:37.437764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:37.437834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:37.438714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:37.438995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:37.527475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:37.527536Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:37.540093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:37.540407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:37.540617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:37.552467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:37.552858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:37.553568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:37.554206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:37.557302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:37.557521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:37.558610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:37.558679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:37.558815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:37.558884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:37.558930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:37.559088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:37.706485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.707978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.708560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... rd: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 42], version: 1 2026-01-08T15:36:48.775981Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 42] was 4 2026-01-08T15:36:48.776045Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 0/2, is published: true 2026-01-08T15:36:48.776407Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:1 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:4 msg type: 268697601 2026-01-08T15:36:48.776548Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 1, tablet: 72057594037968897 2026-01-08T15:36:48.776591Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1852: TOperation RegisterRelationByShardIdx, TxId: 281474976710758, shardIdx: 72057594046678944:4, partId: 1 2026-01-08T15:36:48.776876Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:3 msg type: 268697601 2026-01-08T15:36:48.777112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1813: TOperation RegisterRelationByTabletId collision in routes has found, TxId# 281474976710758, partId# 0, prevPartId# 1, tablet# 72057594037968897, guessDefaultRootHive# yes, prevTx# WorkingDir: "/MyRoot/export-103" OperationType: ESchemeOpCreateTable CreateTable { Name: "1" CopyFromTable: "/MyRoot/Table2" OmitFollowers: true IsBackup: true AllowUnderSameOperation: false OmitIndexes: true } FailOnExist: true, newTx# WorkingDir: "/MyRoot/export-103" OperationType: ESchemeOpCreateTable CreateTable { Name: "0" CopyFromTable: "/MyRoot/Table1" OmitFollowers: true IsBackup: true AllowUnderSameOperation: false OmitIndexes: true } FailOnExist: true 2026-01-08T15:36:48.777173Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1852: TOperation RegisterRelationByShardIdx, TxId: 281474976710758, shardIdx: 72057594046678944:3, partId: 0 2026-01-08T15:36:48.777751Z node 7 :HIVE INFO: tablet_helpers.cpp:1293: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 4 TabletType: DataShard FollowerGroups { } ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 42 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } FAKEHIVE 72057594037968897 TEvCreateTablet Owner: 72057594046678944 OwnerIdx: 4 TabletType: DataShard FollowerGroups { } ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 42 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2026-01-08T15:36:48.777984Z node 7 :HIVE INFO: tablet_helpers.cpp:1357: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 4, type DataShard, boot OK, tablet id 72075186233409548 2026-01-08T15:36:48.778299Z node 7 :HIVE INFO: tablet_helpers.cpp:1293: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 3 TabletType: DataShard FollowerGroups { } ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 41 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } FAKEHIVE 72057594037968897 TEvCreateTablet Owner: 72057594046678944 OwnerIdx: 3 TabletType: DataShard FollowerGroups { } ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 41 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2026-01-08T15:36:48.778468Z node 7 :HIVE INFO: tablet_helpers.cpp:1357: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 3, type DataShard, boot OK, tablet id 72075186233409549 2026-01-08T15:36:48.778866Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6307: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 4 TabletID: 72075186233409548 Origin: 72057594037968897 2026-01-08T15:36:48.778927Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1866: TOperation FindRelatedPartByShardIdx, TxId: 281474976710758, shardIdx: 72057594046678944:4, partId: 1 2026-01-08T15:36:48.779096Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 281474976710758:1, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 4 TabletID: 72075186233409548 Origin: 72057594037968897 2026-01-08T15:36:48.779166Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710758:1 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2026-01-08T15:36:48.779255Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 281474976710758:1 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 4 TabletID: 72075186233409548 Origin: 72057594037968897 2026-01-08T15:36:48.779348Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710758:1 2 -> 3 2026-01-08T15:36:48.779756Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6307: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 3 TabletID: 72075186233409549 Origin: 72057594037968897 2026-01-08T15:36:48.779796Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1866: TOperation FindRelatedPartByShardIdx, TxId: 281474976710758, shardIdx: 72057594046678944:3, partId: 0 2026-01-08T15:36:48.779902Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 281474976710758:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 3 TabletID: 72075186233409549 Origin: 72057594037968897 2026-01-08T15:36:48.779941Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710758:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2026-01-08T15:36:48.779994Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 281474976710758:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 3 TabletID: 72075186233409549 Origin: 72057594037968897 2026-01-08T15:36:48.780049Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 281474976710758:0 2 -> 3 2026-01-08T15:36:48.782227Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:48.782486Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:48.784675Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:48.785002Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:48.786787Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 281474976710758:1, at schemeshard: 72057594046678944 2026-01-08T15:36:48.787358Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976710758:1, at schemeshard: 72057594046678944 2026-01-08T15:36:48.787429Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:70: TCopyTable TConfigureParts operationId# 281474976710758:1 ProgressState at tablet# 72057594046678944 2026-01-08T15:36:48.787490Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:102: TCopyTable TConfigureParts operationId# 281474976710758:1 Propose modify scheme on dstDatashard# 72075186233409548 idx# 72057594046678944:4 srcDatashard# 72075186233409547 idx# 72057594046678944:2 operationId# 281474976710758:1 seqNo# 2:3 at tablet# 72057594046678944 2026-01-08T15:36:48.788186Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 281474976710758:0, at schemeshard: 72057594046678944 2026-01-08T15:36:48.788519Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 281474976710758:0, at schemeshard: 72057594046678944 2026-01-08T15:36:48.788568Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:70: TCopyTable TConfigureParts operationId# 281474976710758:0 ProgressState at tablet# 72057594046678944 2026-01-08T15:36:48.788614Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:102: TCopyTable TConfigureParts operationId# 281474976710758:0 Propose modify scheme on dstDatashard# 72075186233409549 idx# 72057594046678944:3 srcDatashard# 72075186233409546 idx# 72057594046678944:1 operationId# 281474976710758:0 seqNo# 2:4 at tablet# 72057594046678944 2026-01-08T15:36:48.793650Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:1 from tablet: 72057594046678944 to tablet: 72075186233409548 cookie: 72057594046678944:4 msg type: 269549568 2026-01-08T15:36:48.793811Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:1 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2026-01-08T15:36:48.793895Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 1, tablet: 72075186233409548 2026-01-08T15:36:48.793925Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 1, tablet: 72075186233409547 2026-01-08T15:36:48.794567Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:3 msg type: 269549568 2026-01-08T15:36:48.794661Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269549568 2026-01-08T15:36:48.794726Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 0, tablet: 72075186233409549 2026-01-08T15:36:48.794753Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 0, tablet: 72075186233409546 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::IndexMaterializationTwoTables [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ExportTableWithUniqueIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:34.302459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:34.302654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.302726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:34.302787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:34.302833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:34.302863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:34.302922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.302987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:34.304119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:34.304454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:34.433677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:34.433741Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:34.445446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:34.445801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:34.446056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:34.460366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:34.460827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:34.461559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:34.463186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:34.466707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.466956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:34.468193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:34.468256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.468507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:34.468573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:34.468621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:34.468764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:34.667842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.668946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.669954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.670020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.670125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:34.670210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... operation id: 281474976710761:0 2026-01-08T15:36:48.983770Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710761:0 2026-01-08T15:36:48.983850Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 3 2026-01-08T15:36:48.983903Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2026-01-08T15:36:48.983954Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2026-01-08T15:36:48.984005Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 41], 18446744073709551615 2026-01-08T15:36:48.984490Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.984963Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.986402Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:48.986448Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:48.986589Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 41] 2026-01-08T15:36:48.986711Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:48.986747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2026-01-08T15:36:48.986789Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 41 FAKE_COORDINATOR: Erasing txId 281474976710761 2026-01-08T15:36:48.987561Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.987685Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.987738Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:48.987801Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2026-01-08T15:36:48.987866Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:36:48.988421Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.988504Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 41 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.988534Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:48.988569Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 41], version: 18446744073709551615 2026-01-08T15:36:48.988602Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 41] was 2 2026-01-08T15:36:48.988674Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2026-01-08T15:36:48.988740Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [7:128:2153] 2026-01-08T15:36:48.991310Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.991509Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:48.991583Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2026-01-08T15:36:48.991626Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710761 2026-01-08T15:36:48.994792Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2026-01-08T15:36:48.994868Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:953:2901] TestWaitNotification: OK eventTxId 102 2026-01-08T15:36:48.995498Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/ByValue" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2026-01-08T15:36:48.995875Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/ByValue" took 412us result status StatusSuccess 2026-01-08T15:36:48.996791Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/ByValue" PathDescription { Self { Name: "ByValue" PathId: 39 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 38 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 40 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 39 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 39 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "ByValue" LocalPathId: 39 Type: EIndexTypeGlobalUnique State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 39 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> TConsoleTxProcessorTests::TestTxProcessorTemporary [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorRandom >> TConsoleTests::TestRemoveTenantExtSubdomain [GOOD] >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 >> TExportToS3Tests::TopicsWithPermissionsExport [GOOD] >> TExportToS3Tests::Changefeeds [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ReplicatedTableExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:36.820927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:36.821023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:36.821061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:36.821109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:36.821156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:36.821189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:36.821257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:36.821329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:36.822244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:36.822565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:36.907295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:36.907372Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:36.919992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:36.920289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:36.920491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:36.932493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:36.932856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:36.933807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:36.934431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:36.937903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:36.938128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:36.939358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:36.939421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:36.939574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:36.939664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:36.939715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:36.939877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:37.088709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.089747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.089889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.089967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.090968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 4046678944, LocalPathId: 1], version: 8 2026-01-08T15:36:49.938083Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 FAKE_COORDINATOR: Erasing txId 101 2026-01-08T15:36:49.947178Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:49.947310Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 38 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2026-01-08T15:36:49.947353Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2026-01-08T15:36:49.947394Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 38], version: 3 2026-01-08T15:36:49.947440Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 4 2026-01-08T15:36:49.947549Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2026-01-08T15:36:49.949003Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6725: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000038 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1348 } } CommitVersion { Step: 5000038 TxId: 101 } 2026-01-08T15:36:49.949046Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1837: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2026-01-08T15:36:49.949172Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000038 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1348 } } CommitVersion { Step: 5000038 TxId: 101 } 2026-01-08T15:36:49.949313Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000038 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1348 } } CommitVersion { Step: 5000038 TxId: 101 } debug: NTableState::TProposedWaitParts operationId# 101:0 2026-01-08T15:36:49.950900Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5926: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 721 RawX2: 30064773779 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2026-01-08T15:36:49.950952Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1837: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2026-01-08T15:36:49.951112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:628: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 721 RawX2: 30064773779 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2026-01-08T15:36:49.951191Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1081: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2026-01-08T15:36:49.951300Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1085: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 721 RawX2: 30064773779 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2026-01-08T15:36:49.951370Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:673: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:49.951418Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:710: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:49.951466Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:722: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2026-01-08T15:36:49.951535Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 101:0 129 -> 240 2026-01-08T15:36:49.952428Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:36:49.959366Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2026-01-08T15:36:49.959746Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:49.961029Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:656: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:49.961227Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2026-01-08T15:36:49.961288Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:497: [72057594046678944] TDone opId# 101:0 ProgressState 2026-01-08T15:36:49.961468Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:49.961529Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:49.961588Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#101:0 progress is 1/1 2026-01-08T15:36:49.961625Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:49.961667Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2026-01-08T15:36:49.961731Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1702: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:749:2727] message: TxId: 101 2026-01-08T15:36:49.961783Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2026-01-08T15:36:49.961829Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 101:0 2026-01-08T15:36:49.961868Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 101:0 2026-01-08T15:36:49.962021Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 38] was 3 2026-01-08T15:36:49.964162Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2026-01-08T15:36:49.964228Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [7:750:2728] TestWaitNotification: OK eventTxId 101 2026-01-08T15:36:49.964801Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:36:49.965075Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 310us result status StatusSuccess 2026-01-08T15:36:49.965733Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 38 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000038 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 37 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 38 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> EraseRowsTests::EraseRowsShouldSuccess [GOOD] >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain [GOOD] >> TConsoleTests::TestAlterUnknownTenant >> TExportToS3Tests::AuditCompletedExport >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::IndexMaterializationTwoTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:34.564028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:34.564142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.564189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:34.564254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:34.564314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:34.564348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:34.564412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.564514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:34.565542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:34.565881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:34.807265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:34.807348Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:34.824965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:34.825370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:34.825600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:34.839347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:34.839819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:34.840716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:34.841502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:34.853225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.853818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:34.856717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:34.856867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:34.857150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:34.857319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:34.857463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:34.857861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:35.025425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.026582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.026740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.026822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.026890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.026977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.027800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... e publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2026-01-08T15:36:50.727112Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 44 Version: 15 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.727176Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 44 Version: 15 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.727198Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710765 2026-01-08T15:36:50.727221Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710765, pathId: [OwnerId: 72057594046678944, LocalPathId: 44], version: 15 2026-01-08T15:36:50.727252Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 44] was 4 2026-01-08T15:36:50.727324Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 0/1, is published: true 2026-01-08T15:36:50.729993Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:31: NotifyTxCompletion operation in-flight, txId: 281474976710765, at schemeshard: 72057594046678944 2026-01-08T15:36:50.730029Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 0/1, is published: true 2026-01-08T15:36:50.730055Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:132: NotifyTxCompletion transaction is registered, txId: 281474976710765, at schemeshard: 72057594046678944 2026-01-08T15:36:50.730494Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710765:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710765 msg type: 269090816 2026-01-08T15:36:50.730598Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710765, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710765 at step: 5000048 FAKE_COORDINATOR: advance: minStep5000048 State->FrontStep: 5000047 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710765 at step: 5000048 2026-01-08T15:36:50.730867Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000048, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:50.730951Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710765 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 30064773232 } } Step: 5000048 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:50.730990Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710765:0, step: 5000048, at schemeshard: 72057594046678944 2026-01-08T15:36:50.731107Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710765:0, at schemeshard: 72057594046678944 2026-01-08T15:36:50.731163Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710765:0 progress is 1/1 2026-01-08T15:36:50.731199Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2026-01-08T15:36:50.731236Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710765:0 progress is 1/1 2026-01-08T15:36:50.731278Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2026-01-08T15:36:50.731325Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:36:50.731388Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 44] was 3 2026-01-08T15:36:50.731425Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710765, ready parts: 1/1, is published: false 2026-01-08T15:36:50.731467Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710765 ready parts: 1/1 2026-01-08T15:36:50.731540Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710765:0 2026-01-08T15:36:50.731574Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710765:0 2026-01-08T15:36:50.731629Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 44] was 4 2026-01-08T15:36:50.731675Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976710765, publications: 2, subscribers: 1 2026-01-08T15:36:50.731719Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710765, [OwnerId: 72057594046678944, LocalPathId: 1], 18 2026-01-08T15:36:50.731754Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710765, [OwnerId: 72057594046678944, LocalPathId: 44], 18446744073709551615 2026-01-08T15:36:50.732313Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.732384Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.733894Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:50.733936Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710765, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:50.734085Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710765, path id: [OwnerId: 72057594046678944, LocalPathId: 44] 2026-01-08T15:36:50.734175Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:50.734207Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710765, path id: 1 2026-01-08T15:36:50.734236Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710765, path id: 44 FAKE_COORDINATOR: Erasing txId 281474976710765 2026-01-08T15:36:50.734941Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 18 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.735021Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 18 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.735054Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710765 2026-01-08T15:36:50.735098Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710765, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 18 2026-01-08T15:36:50.735141Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2026-01-08T15:36:50.735532Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 44 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.735582Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 44 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.735604Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710765 2026-01-08T15:36:50.735630Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710765, pathId: [OwnerId: 72057594046678944, LocalPathId: 44], version: 18446744073709551615 2026-01-08T15:36:50.735679Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 44] was 3 2026-01-08T15:36:50.735741Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710765, subscribers: 1 2026-01-08T15:36:50.735787Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [7:128:2153] 2026-01-08T15:36:50.738255Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.739102Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710765 2026-01-08T15:36:50.739204Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710765 2026-01-08T15:36:50.739260Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710765 2026-01-08T15:36:50.741046Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2026-01-08T15:36:50.741108Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:1235:3127] TestWaitNotification: OK eventTxId 103 |88.6%| [TM] {BAZEL_UPLOAD} ydb/services/metadata/initializer/ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |88.6%| [TM] {BAZEL_UPLOAD} ydb/tests/datashard/s3/py3test >> KqpQueryPerf::Replace+QueryService-UseSink >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TopicsWithPermissionsExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:37.188365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:37.188484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:37.188531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:37.188566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:37.188612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:37.188644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:37.188718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:37.188805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:37.189723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:37.190057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:37.281735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:37.281852Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:37.292171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:37.292458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:37.292607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:37.302041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:37.302421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:37.303039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:37.303568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:37.306055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:37.306293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:37.307407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:37.307470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:37.307610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:37.307685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:37.307738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:37.307882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:37.464373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.465586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.465728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.465839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.465973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:37.466986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... 6-01-08T15:36:51.379223Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 3 2026-01-08T15:36:51.379247Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:36:51.379307Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 0/1, is published: true 2026-01-08T15:36:51.380533Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:31: NotifyTxCompletion operation in-flight, txId: 281474976710758, at schemeshard: 72057594046678944 2026-01-08T15:36:51.380569Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 0/1, is published: true 2026-01-08T15:36:51.380598Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:132: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 2026-01-08T15:36:51.381587Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 281474976710758:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710758 msg type: 269090816 2026-01-08T15:36:51.381676Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 4294967295, tablet: 72057594046316545 2026-01-08T15:36:51.382328Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000044 FAKE_COORDINATOR: advance: minStep5000044 State->FrontStep: 5000043 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000044 2026-01-08T15:36:51.382539Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000044, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:51.382607Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 30064773232 } } Step: 5000044 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:51.382638Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000044, at schemeshard: 72057594046678944 2026-01-08T15:36:51.382706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2026-01-08T15:36:51.382751Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710758:0 progress is 1/1 2026-01-08T15:36:51.382775Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2026-01-08T15:36:51.382804Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#281474976710758:0 progress is 1/1 2026-01-08T15:36:51.382831Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2026-01-08T15:36:51.382872Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 7 2026-01-08T15:36:51.382933Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 2026-01-08T15:36:51.382960Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2026-01-08T15:36:51.382993Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2026-01-08T15:36:51.383017Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 281474976710758:0 2026-01-08T15:36:51.383038Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 281474976710758:0 2026-01-08T15:36:51.383084Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 2 2026-01-08T15:36:51.383116Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2026-01-08T15:36:51.383143Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 20 2026-01-08T15:36:51.383176Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 43], 18446744073709551615 2026-01-08T15:36:51.383662Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.384720Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:51.384752Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:51.384878Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 43] 2026-01-08T15:36:51.384969Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:51.385007Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2026-01-08T15:36:51.385037Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:210:2211], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 43 FAKE_COORDINATOR: Erasing txId 281474976710758 2026-01-08T15:36:51.385621Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 20 PathOwnerId: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.385689Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 20 PathOwnerId: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.385729Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2026-01-08T15:36:51.385759Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 20 2026-01-08T15:36:51.385784Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 8 2026-01-08T15:36:51.386059Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.386114Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 43 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.386139Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2026-01-08T15:36:51.386159Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 43], version: 18446744073709551615 2026-01-08T15:36:51.386189Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 43] was 1 2026-01-08T15:36:51.386236Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2026-01-08T15:36:51.386271Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [7:128:2153] 2026-01-08T15:36:51.386667Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:36:51.386724Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 43], at schemeshard: 72057594046678944 2026-01-08T15:36:51.386806Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 7 2026-01-08T15:36:51.389368Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.389718Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2026-01-08T15:36:51.389769Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:36:51.389844Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2026-01-08T15:36:51.389894Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710758 2026-01-08T15:36:51.391093Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2026-01-08T15:36:51.391126Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:1500:3323] TestWaitNotification: OK eventTxId 106 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:34.956953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:34.957044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.957094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:34.957140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:34.957180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:34.957212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:34.957283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:34.957391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:34.958270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:34.981456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:35.182338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:35.182413Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:35.196088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:35.196443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:35.196643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:35.223426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:35.223825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:35.224561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:35.234724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:35.240474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:35.240721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:35.241873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:35.241982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:35.242136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:35.242202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:35.242258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:35.242476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:35.423661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.424953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:35.425344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:51.868619Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2026-01-08T15:36:51.868669Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2026-01-08T15:36:51.869386Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 45 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:51.869491Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 45 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:51.869531Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2026-01-08T15:36:51.869583Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 45], version: 18446744073709551615 2026-01-08T15:36:51.869625Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 45] was 2 2026-01-08T15:36:51.869723Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2026-01-08T15:36:51.869785Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [6:129:2154] 2026-01-08T15:36:51.873216Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:51.873609Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2026-01-08T15:36:51.873720Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2026-01-08T15:36:51.873790Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710761 2026-01-08T15:36:51.873844Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:372: TExport::TTxProgress: DoExecute 2026-01-08T15:36:51.873875Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:1333: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2026-01-08T15:36:51.873909Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:1364: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2026-01-08T15:36:51.875931Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:392: TExport::TTxProgress: DoComplete 2026-01-08T15:36:51.876042Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2026-01-08T15:36:51.876094Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:1534:3326] TestWaitNotification: OK eventTxId 105 2026-01-08T15:36:51.877452Z node 6 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 105 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 45] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2026-01-08T15:36:51.879892Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:372: TExport::TTxProgress: DoExecute 2026-01-08T15:36:51.879953Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:817: TExport::TTxProgress: Resume: id# 105 2026-01-08T15:36:51.880050Z node 6 :EXPORT INFO: schemeshard_export__create.cpp:598: TExport::TTxProgress: Allocate txId: info# { Id: 105 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 45] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2026-01-08T15:36:51.880159Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:392: TExport::TTxProgress: DoComplete 2026-01-08T15:36:51.880267Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 105, at schemeshard: 72057594046678944 2026-01-08T15:36:51.880332Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:372: TExport::TTxProgress: DoExecute 2026-01-08T15:36:51.880376Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:938: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 105 2026-01-08T15:36:51.880449Z node 6 :EXPORT INFO: schemeshard_export__create.cpp:590: TExport::TTxProgress: Drop propose: info# { Id: 105 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 45] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2026-01-08T15:36:51.880581Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:392: TExport::TTxProgress: DoComplete 2026-01-08T15:36:51.883848Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-105" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 PeerName: "" SanitizedToken: "" , at schemeshard: 72057594046678944 2026-01-08T15:36:51.884052Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/export-105, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2026-01-08T15:36:51.884328Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-105', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000046, drop txId: 281474976710761, at schemeshard: 72057594046678944 2026-01-08T15:36:51.887133Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-105\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000046, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 45 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2026-01-08T15:36:51.887468Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-105', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000046, drop txId: 281474976710761, operation: DROP DIRECTORY, path: /MyRoot/export-105 2026-01-08T15:36:51.887711Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7145: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2026-01-08T15:36:51.887836Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7147: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-105\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000046, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 45 PathDropTxId: 281474976710761 2026-01-08T15:36:51.887922Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:372: TExport::TTxProgress: DoExecute 2026-01-08T15:36:51.887978Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:999: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2026-01-08T15:36:51.888079Z node 6 :EXPORT TRACE: schemeshard_export__create.cpp:1000: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-105\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 45], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000046, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 45 PathDropTxId: 281474976710761 2026-01-08T15:36:51.888230Z node 6 :EXPORT INFO: schemeshard_export__create.cpp:1181: TExport::TTxProgress: Wait for completion: info# { Id: 105 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 45] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2026-01-08T15:36:51.895201Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:392: TExport::TTxProgress: DoComplete 2026-01-08T15:36:51.895432Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2026-01-08T15:36:51.895648Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7196: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2026-01-08T15:36:51.895750Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7198: Message: TxId: 281474976710761 2026-01-08T15:36:51.895820Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:372: TExport::TTxProgress: DoExecute 2026-01-08T15:36:51.895889Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:1333: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2026-01-08T15:36:51.895942Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:1364: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2026-01-08T15:36:51.897852Z node 6 :EXPORT DEBUG: schemeshard_export__create.cpp:392: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 105 2026-01-08T15:36:51.898097Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2026-01-08T15:36:51.898155Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2026-01-08T15:36:51.898654Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2026-01-08T15:36:51.898765Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2026-01-08T15:36:51.898804Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:1754:3542] TestWaitNotification: OK eventTxId 105 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_export/unittest >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain [GOOD] >> TConsoleTests::TestListTenants >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds >> TBSV::ShouldLimitBlockStoreVolumeDropRate >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds >> TestSetCloudPermissions::CanSetPermissionsForRootDb >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TConsoleTests::TestAlterUnknownTenant [GOOD] >> TConsoleTests::TestAlterUnknownTenantExtSubdomain >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:49.780722Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:49.903266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:49.903605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:49.903737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:50.183078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:50.183189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:50.261565Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886606950539 != 1767886606950543 2026-01-08T15:36:50.270631Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:50.314337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:50.418311Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:50.687123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:50.700846Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:50.805686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:50.842159Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:50.842447Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:50.883092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:50.883202Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:50.884578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:50.884648Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:50.884695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:50.884997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:50.885119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:50.885224Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:50.895891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:50.919354Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:50.919541Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:50.919632Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:50.919695Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:50.919724Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:50.919751Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:50.920182Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:50.920262Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:50.920330Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:50.920359Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:50.920394Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:50.920434Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:50.920748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:50.920879Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:50.921062Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:50.921147Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:50.922498Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:50.933039Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:50.933144Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:51.050450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:51.054680Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:51.054751Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:51.054957Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:51.055003Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:51.055047Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:51.055262Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:51.055385Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:51.055997Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:51.056057Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:51.057897Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:51.058303Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:51.059969Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:51.060018Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:51.060531Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:51.060599Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:51.061416Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:51.061463Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:51.061507Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:51.061557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:51.061598Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:51.061668Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:51.073294Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:51.074924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:51.075014Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:51.075932Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:51.092370Z node 1 :KQP_WORKLOAD_SERV ... hard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:56.138170Z node 2 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:56.138201Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.138627Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:56.138759Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:56.138862Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:56.138910Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:56.138955Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:56.139009Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:56.139113Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:883:2763], serverId# [2:888:2766], sessionId# [0:0:0] 2026-01-08T15:36:56.139621Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:56.139919Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:56.140012Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:56.141905Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:56.153067Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:56.153221Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:56.271260Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:917:2783], serverId# [2:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:56.271928Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:56.271993Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.272602Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:56.272666Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:56.272717Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:56.273015Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:56.273164Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:56.273981Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:56.274065Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:56.274599Z node 2 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:56.275103Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:56.277227Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:56.277298Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.277919Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:56.278004Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:56.279302Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:56.279358Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:56.279408Z node 2 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:56.279496Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:56.279548Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:56.279677Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.281033Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:56.283011Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:56.283337Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:56.283404Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:56.288796Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:953:2811], serverId# [2:954:2812], sessionId# [0:0:0] 2026-01-08T15:36:56.288927Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.300040Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.300143Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.300540Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:953:2811], serverId# [2:954:2812], sessionId# [0:0:0] 2026-01-08T15:36:56.302853Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:959:2817], serverId# [2:960:2818], sessionId# [0:0:0] 2026-01-08T15:36:56.302988Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.303203Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.303255Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.303480Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:959:2817], serverId# [2:960:2818], sessionId# [0:0:0] 2026-01-08T15:36:56.305421Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:964:2822], serverId# [2:965:2823], sessionId# [0:0:0] 2026-01-08T15:36:56.305611Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.305811Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.305857Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.306075Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:964:2822], serverId# [2:965:2823], sessionId# [0:0:0] 2026-01-08T15:36:56.307917Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:969:2827], serverId# [2:970:2828], sessionId# [0:0:0] 2026-01-08T15:36:56.308072Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.308256Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.308302Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.308508Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:969:2827], serverId# [2:970:2828], sessionId# [0:0:0] 2026-01-08T15:36:56.310356Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:974:2832], serverId# [2:975:2833], sessionId# [0:0:0] 2026-01-08T15:36:56.310519Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.310828Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.310882Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.311089Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:974:2832], serverId# [2:975:2833], sessionId# [0:0:0] 2026-01-08T15:36:56.313028Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:979:2837], serverId# [2:980:2838], sessionId# [0:0:0] 2026-01-08T15:36:56.313181Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:56.313382Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:56.313428Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:56.313632Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:979:2837], serverId# [2:980:2838], sessionId# [0:0:0] |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit >> TestSetCloudPermissions::CanSetPermissionsForRootDb [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] Test command err: 2026-01-08T15:36:23.897038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:23.897108Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... tep: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710674 at step: 5000033 FAKE_COORDINATOR: Erasing txId 281474976710674 FAKE_COORDINATOR: advance: minStep5000034 State->FrontStep: 5000033 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710678 at step: 5000034 FAKE_COORDINATOR: Erasing txId 281474976710678 FAKE_COORDINATOR: advance: minStep5000035 State->FrontStep: 5000034 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710673 at step: 5000035 FAKE_COORDINATOR: Erasing txId 281474976710673 FAKE_COORDINATOR: advance: minStep5000036 State->FrontStep: 5000035 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710672 at step: 5000036 FAKE_COORDINATOR: Erasing txId 281474976710672 ... waiting for SysViewsRoster update finished (done) TestModificationResults wait txId: 1 2026-01-08T15:36:25.286727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000037 FAKE_COORDINATOR: advance: minStep5000037 State->FrontStep: 5000036 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000037 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink >> TConsoleTests::TestListTenants [GOOD] >> TConsoleTests::TestListTenantsExtSubdomain >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetPermissionsForRootDb [GOOD] Test command err: 2026-01-08T15:36:55.231947Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015196148984874:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:55.232010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:55.472878Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:55.478927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:55.479021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:55.511924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:55.600364Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593015196148984845:2081] 1767886615230438 != 1767886615230441 2026-01-08T15:36:55.607851Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:55.658362Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:55.667002Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:519: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( ydb.schemas.getMetadata ydb.clusters.get ydb.clusters.manage ydb.databases.list ydb.clusters.monitor ydb.streams.write ydb.tables.write ydb.databases.create ydb.databases.connect ydb.tables.select) 2026-01-08T15:36:55.667096Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:83: [7ce6fe0f07d0] Connect to grpc://localhost:11964 2026-01-08T15:36:55.669987Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7ce6fe0f07d0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.schemas.getMetadata" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.get" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.manage" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.databases.l...(truncated) } 2026-01-08T15:36:55.698644Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [7ce6fe0f07d0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2026-01-08T15:36:55.699728Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1875: Ticket **** (8E120919) () has now valid token of user1@as 2026-01-08T15:36:55.700084Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:718: Skip check permission connect db, user is a admin, database: /Root, user: user1@as, from ip: |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TBSV::CleanupDroppedVolumesOnRestart >> TConsoleTests::TestAlterUnknownTenantExtSubdomain [GOOD] >> TConsoleTests::TestAlterBorrowedStorage >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetAllPermissions >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:52.241956Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:52.353890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:52.354119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:52.354202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:52.606161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:52.606298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:52.677608Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886609795278 != 1767886609795282 2026-01-08T15:36:52.690168Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:52.737723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:52.834240Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:53.122123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:53.135818Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:53.242820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:53.288056Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:53.288368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:53.336903Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:53.337040Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:53.338839Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:53.338937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:53.339004Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:53.339405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:53.339577Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:53.339692Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:53.350531Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:53.402951Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:53.403154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:53.403270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:53.403329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:53.403370Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:53.403422Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.403930Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:53.404060Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:53.404165Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.404206Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:53.404244Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:53.404287Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.404653Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:53.404828Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:53.405087Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:53.405190Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:53.413191Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:53.424013Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:53.424157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:53.545677Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:53.559986Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:53.560086Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.560398Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.560448Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:53.560503Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:53.560769Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:53.560970Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:53.565742Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.565879Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:53.568222Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:53.568740Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:53.571858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:53.571929Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.572705Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:53.572783Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.574071Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.574148Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:53.574225Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:53.574285Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:53.574756Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:53.574856Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.580290Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:53.584198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:53.584275Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:53.585012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:53.596106Z node 1 :KQP_WORKLOAD_SERV ... pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:58.935362Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:58.935546Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:58.935659Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:58.936166Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:58.936227Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:58.936530Z node 2 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:58.936870Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:58.938840Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:58.938881Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.939254Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:58.939303Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.940259Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.940303Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:58.940343Z node 2 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:58.940402Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:58.940454Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:58.940534Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.941832Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:58.943582Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:58.943904Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:58.943975Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:58.952539Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.952646Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:961:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.952738Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.953697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.953880Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.957927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:58.964356Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.080426Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.083502Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:36:59.117940Z node 2 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [2:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:59.198260Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:1053:2873], serverId# [2:1054:2874], sessionId# [0:0:0] 2026-01-08T15:36:59.198712Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:36:59.198871Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=4 2026-01-08T15:36:59.209883Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.213987Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:1061:2880], serverId# [2:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.214730Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:59.225687Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:59.225749Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.225937Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.225980Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 3, at: 72075186224037888 2026-01-08T15:36:59.226064Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.226097Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.226130Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:59.226173Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.226390Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:1061:2880], serverId# [2:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.227109Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:59.227333Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:59.227443Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.227470Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.227501Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:36:59.227670Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.227733Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.228225Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:36:59.228425Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:36:59.228569Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2026-01-08T15:36:59.228605Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2026-01-08T15:36:59.230268Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.230311Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037888 2026-01-08T15:36:59.230393Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.230424Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.230456Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2026-01-08T15:36:59.230553Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.230594Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.230628Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:47.965268Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:48.071107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:48.071410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:48.071572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:48.340726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:48.340901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:48.405355Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886605123792 != 1767886605123796 2026-01-08T15:36:48.423981Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:48.468742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:48.551050Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:48.831034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:48.844127Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:48.945758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:48.975751Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:48.975965Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:49.020115Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:49.020242Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:49.021999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:49.022095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:49.022169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:49.022568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:49.022736Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:49.022826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:49.033609Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:49.079589Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:49.079790Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:49.079888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:49.079930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:49.079977Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:49.080019Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.080493Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:49.080608Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:49.080692Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.080726Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:49.080781Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:49.080818Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.081188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:49.081341Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:49.081598Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:49.081708Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:49.083365Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:49.094082Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:49.094220Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:49.211664Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:49.216987Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:49.217067Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.217333Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.217379Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:49.217430Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:49.217749Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:49.217908Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:49.218758Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.218829Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:49.220987Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:49.221540Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:49.225702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:49.225782Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.226685Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:49.226776Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.228189Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.228250Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:49.228304Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:49.228380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:49.228469Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:49.228576Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.234664Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:49.236698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:49.236806Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:49.237848Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:49.249320Z node 1 :KQP_WORKLOAD_SERV ... pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:58.906886Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:58.907114Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:58.907214Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:58.907386Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:58.907454Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:58.907833Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:58.908183Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:58.909828Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:58.909877Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.910829Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:58.910920Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.911925Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.911970Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:58.912014Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:58.912078Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:58.912119Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:58.912197Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.913725Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:58.915034Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:58.915159Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:58.915200Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:58.922819Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.922909Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:962:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.923254Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.924037Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.924289Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:58.927908Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:58.933389Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.047391Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.049674Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:36:59.084396Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:59.158946Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1053:2873], serverId# [3:1054:2874], sessionId# [0:0:0] 2026-01-08T15:36:59.159246Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:36:59.159359Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=4 2026-01-08T15:36:59.170110Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.173520Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.174237Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:59.185346Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:59.185421Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.185767Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.185805Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 3, at: 72075186224037888 2026-01-08T15:36:59.185925Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.185994Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.186028Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.186064Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:59.186104Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.187038Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:59.187352Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:59.187516Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.187550Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.187606Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:36:59.187778Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.187834Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.188388Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:36:59.188568Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 37, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:36:59.188677Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2026-01-08T15:36:59.188718Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2026-01-08T15:36:59.228302Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.228354Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037888 2026-01-08T15:36:59.228429Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.228453Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.228480Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2026-01-08T15:36:59.228569Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.228606Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.228639Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15181, MsgBus: 32686 2026-01-08T15:36:53.200391Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015187076249799:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:53.201206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:53.467495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:53.467623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:53.505585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:53.517483Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:53.604671Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:53.614449Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593015187076249759:2081] 1767886613196803 != 1767886613196806 2026-01-08T15:36:53.746342Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:53.766978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:53.767005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:53.767011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:53.767082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:54.189178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:54.204447Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:54.249734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:54.401352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:54.563727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:54.629186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.377270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593015199961153523:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.377395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.377626Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593015199961153533:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.377657Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.691264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.719560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.744293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.768190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.792182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.824355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.856825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.927441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:56.989576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593015199961154406:2482], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.989655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593015199961154411:2485], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.989660Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.989870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593015199961154413:2486], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.989921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:56.993330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:57.003911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593015199961154414:2487], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2026-01-08T15:36:57.067128Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593015204256121762:3773] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 53], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:58.199518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593015187076249799:2078];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:58.199599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/perf/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:59.570363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:59.570435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:59.570463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:59.570491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:59.570534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:59.570556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:59.570606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:59.570690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:59.571350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:59.571555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:59.655603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:59.655691Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:59.667549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:59.668008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:59.668245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:59.681448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:59.681946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:59.682803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:59.684070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:59.688030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:59.688180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:59.689042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:59.689087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:59.689168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:59.689196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:59.689226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:59.689317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.694900Z node 1 :HIVE INFO: tablet_helpers.cpp:1241: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:243:2058] recipient: [1:15:2062] 2026-01-08T15:36:59.811405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:377: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2026-01-08T15:36:59.811597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.811784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2026-01-08T15:36:59.811820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5553: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2026-01-08T15:36:59.812051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:127: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2026-01-08T15:36:59.812163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:59.814687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:460: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:59.814935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2026-01-08T15:36:59.815238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.815321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2026-01-08T15:36:59.815364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2026-01-08T15:36:59.815395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 2 -> 3 2026-01-08T15:36:59.817351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.817407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2026-01-08T15:36:59.817443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 3 -> 128 2026-01-08T15:36:59.818906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:494: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.818951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2026-01-08T15:36:59.818984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:59.819021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1723: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2026-01-08T15:36:59.822277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1792: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2026-01-08T15:36:59.824067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:665: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2026-01-08T15:36:59.824242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1824: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2026-01-08T15:36:59.825478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:59.825599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:36:59.825649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:59.825906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2700: Change state for txid 1:0 128 -> 240 2026-01-08T15:36:59.825958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2026-01-08T15:36:59.826123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2026-01-08T15:36:59.826199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:412: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2026-01-08T15:36:59.828237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:59.828280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard Desc ... [1:15:2062] 2026-01-08T15:37:00.064144Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:37:00.064362Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 226us result status StatusPathDoesNotExist 2026-01-08T15:37:00.064480Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2026-01-08T15:37:00.065463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:406:2382] sender: [1:473:2058] recipient: [1:106:2140] Leader for TabletID 72057594046678944 is [1:406:2382] sender: [1:476:2058] recipient: [1:475:2434] Leader for TabletID 72057594046678944 is [1:477:2435] sender: [1:478:2058] recipient: [1:475:2434] 2026-01-08T15:37:00.102146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:37:00.102274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:37:00.102309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:37:00.102361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:37:00.102401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:37:00.102444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:37:00.102493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:37:00.102551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:37:00.103165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:37:00.103456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:37:00.114657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:37:00.116193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:37:00.116392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:37:00.116556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:37:00.116603Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:37:00.116817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:37:00.117612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1418: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2026-01-08T15:37:00.117684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1492: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.117744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1518: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1820: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_shred_manager.cpp:452: [RootShredManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberShredTenantsInRunning# 0 2026-01-08T15:37:00.118251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2153: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2211: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2297: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2363: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2514: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2893: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.118939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2972: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3473: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3509: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3754: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3899: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3916: TTxInit for SystemShardToDelete, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3933: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4094: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.119915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4110: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4395: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4762: IndexBuild , records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4828: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4887: KMeansTreeCluster records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4976: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5003: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.120550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:5030: LongLocks: records: 0, at schemeshard: 72057594046678944 2026-01-08T15:37:00.124040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:37:00.126276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:37:00.126380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:37:00.126462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:37:00.126512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:37:00.126559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:37:00.128972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:477:2435] sender: [1:539:2058] recipient: [1:15:2062] 2026-01-08T15:37:00.160940Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2026-01-08T15:37:00.161181Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 222us result status StatusPathDoesNotExist 2026-01-08T15:37:00.161302Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] >> TCacheTest::MigrationDeletedPathNavigate [GOOD] >> TestSetCloudPermissions::CanSetPermissionsForDbWithoutCloudUserAttributes |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:53.842903Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:53.971628Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:53.972059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:53.972217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:54.256681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:54.256833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:54.336220Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886611132869 != 1767886611132873 2026-01-08T15:36:54.350709Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:54.395210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:54.500436Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:54.786629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:54.803021Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:54.909639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:54.948004Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:54.948265Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:54.990348Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:54.990493Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:54.992192Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:54.992269Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:54.992316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:54.992666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:54.992801Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:54.992876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:55.003472Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:55.029058Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:55.029277Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:55.029385Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:55.029425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:55.029461Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:55.029494Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:55.030011Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:55.030146Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:55.030222Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:55.030255Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:55.030293Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:55.030349Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:55.030740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:55.030870Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:55.031098Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:55.031194Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:55.032639Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:55.043357Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:55.043477Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:55.160402Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:55.165631Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:55.165748Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:55.166061Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:55.166109Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:55.166181Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:55.166481Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:55.166673Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:55.167423Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:55.167504Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:55.169901Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:55.170412Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:55.172585Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:55.172656Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:55.173389Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:55.173465Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:55.174651Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:55.174699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:55.174746Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:55.174818Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:55.174871Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:55.174949Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:55.180441Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:55.182146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:55.182223Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:55.183127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:55.193494Z node 1 :KQP_WORKLOAD_SERV ... 6-01-08T15:36:58.958881Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:58.959028Z node 2 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:59.138532Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:59.138637Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:59.156693Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [2:34:2081] 1767886616309001 != 1767886616309004 2026-01-08T15:36:59.163876Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:59.205299Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:59.273684Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:59.600189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:59.613552Z node 2 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:59.717382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:59.738411Z node 2 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:886:2765] 2026-01-08T15:36:59.738632Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:59.775936Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:59.776080Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:59.777633Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:59.777728Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:59.777790Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:59.778149Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:59.778341Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:59.778421Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [2:901:2765] in generation 1 2026-01-08T15:36:59.789192Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:59.789295Z node 2 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:59.789421Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:59.789507Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [2:903:2775] 2026-01-08T15:36:59.789554Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:59.789596Z node 2 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:59.789637Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.789997Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:59.790098Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:59.790190Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.790241Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.790284Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:59.790355Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.790465Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:883:2763], serverId# [2:888:2766], sessionId# [0:0:0] 2026-01-08T15:36:59.790941Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:59.791196Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:59.791281Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:59.793113Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.803893Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:59.804020Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:59.921155Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:917:2783], serverId# [2:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:59.921781Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:59.921841Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.922509Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.922569Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:59.922631Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:59.922930Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:59.923073Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:59.923730Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.923795Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:59.924148Z node 2 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:59.924526Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.926011Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:59.926057Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.926609Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:59.926712Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.927667Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.927707Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:59.927746Z node 2 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:59.927801Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:59.927847Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:59.927943Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.928977Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.930298Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:59.930648Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:59.930718Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:59.936544Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [2:953:2811], serverId# [2:954:2812], sessionId# [0:0:0] 2026-01-08T15:36:59.936655Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:209: Rejecting erase request on datashard: tablet# 72075186224037888, error# Can't execute erase at replicated table 2026-01-08T15:36:59.936833Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [2:953:2811], serverId# [2:954:2812], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:48.582331Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:48.689063Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:48.689496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:48.689700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:48.968241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:48.968414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:49.056882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886605813959 != 1767886605813963 2026-01-08T15:36:49.068917Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:49.116088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:49.208323Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:49.477806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:49.490781Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:49.593356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:49.630078Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:49.630269Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:49.666476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:49.666580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:49.667927Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:49.668001Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:49.668047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:49.668323Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:49.668436Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:49.668500Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:49.680113Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:49.712469Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:49.712709Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:49.712889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:49.712930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:49.712970Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:49.713014Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.713537Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:49.713669Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:49.713777Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.713833Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:49.713878Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:49.713916Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.714275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:49.714441Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:49.714726Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:49.714852Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:49.716431Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:49.727175Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:49.727301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:49.845602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:49.850343Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:49.850426Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.850677Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.850729Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:49.850782Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:49.851084Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:49.851264Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:49.852045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.852124Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:49.854110Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:49.854621Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:49.856823Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:49.856876Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.857615Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:49.857694Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.858865Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.858910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:49.859034Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:49.859113Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:49.859167Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:49.859243Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.864547Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:49.866389Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:49.866468Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:49.867390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:49.877381Z node 1 :KQP_WORKLOAD_SERV ... pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:59.570129Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:59.570485Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:59.570670Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:59.570910Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.570978Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:59.571554Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:59.572083Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.574638Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:59.574703Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.575991Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:59.576089Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.577515Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.577574Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:59.577631Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:59.577714Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:59.577787Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:59.577903Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.579939Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.581946Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:59.582172Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:59.582246Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:59.593397Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:59.593526Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:962:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:59.594043Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:59.595066Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:59.595376Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:36:59.603440Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:59.611094Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.733181Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:59.736261Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:36:59.770827Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:59.910518Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1053:2873], serverId# [3:1054:2874], sessionId# [0:0:0] 2026-01-08T15:36:59.910950Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:36:59.911160Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=5 2026-01-08T15:36:59.922064Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.926342Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.927486Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:36:59.938883Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:36:59.938961Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:59.939311Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.939362Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 3, at: 72075186224037888 2026-01-08T15:36:59.939503Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:36:59.939591Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.939650Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.939704Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:59.939762Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.940719Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:59.941081Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:59.941285Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.941332Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.941378Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:36:59.941606Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.941670Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.942245Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:36:59.942602Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 43, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:36:59.942763Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2026-01-08T15:36:59.942814Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2026-01-08T15:36:59.972168Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:36:59.972229Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037888 2026-01-08T15:36:59.972326Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:59.972358Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:36:59.972392Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2026-01-08T15:36:59.972506Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:59.972556Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:59.972600Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> DistributedEraseTests::ConditionalEraseRowsCheckLimits [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationDeletedPathNavigate [GOOD] Test command err: 2026-01-08T15:36:28.944354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:28.944433Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded ... waiting for SysViewsRoster update finished FAKE_COORDINATOR: Add transaction: 281474976710657 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710657 at step: 5000001 FAKE_COORDINATOR: Erasing txId 281474976710657 FAKE_COORDINATOR: Add transaction: 281474976710671 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710671 at step: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710666 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710670 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710665 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710669 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710664 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710668 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710663 at step: 5000009 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710667 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710662 at step: 5000011 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710661 at step: 5000012 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710660 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710659 at step: 5000014 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710658 at step: 5000015 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710692 at step: 5000016 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710687 at step: 5000017 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710691 at step: 5000018 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710686 at step: 5000019 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710690 at step: 5000020 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710685 at step: 5000021 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710689 at step: 5000022 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710684 at step: 5000023 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710688 at step: 5000024 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710683 at step: 5000025 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710682 at step: 5000026 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710677 at step: 5000027 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710681 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710676 at step: 5000029 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710680 at step: 5000030 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710675 at step: 5000031 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710679 at step: 5000032 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710674 at step: 5000033 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710678 at step: 5000034 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710673 at step: 5000035 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Add transaction: 281474976710672 at step: 5000036 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 281474976710671 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710666 at step: 5000003 FAKE_COORDINATOR: Erasing txId 281474976710666 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710670 at step: 5000004 FAKE_COORDINATOR: Erasing txId 281474976710670 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710665 at step: 5000005 FAKE_COORDINATOR: Erasing txId 281474976710665 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710669 at step: 5000006 FAKE_COORDINATOR: Erasing txId 281474976710669 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710664 at step: 5000007 FAKE_COORDINATOR: Erasing txId 281474976710664 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710668 at step: 5000008 FAKE_COORDINATOR: Erasing txId 281474976710668 FAKE_COORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710663 at step: 5000009 FAKE_COORDINATOR: Erasing txId 281474976710663 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710667 at step: 5000010 FAKE_COORDINATOR: Erasing txId 281474976710667 FAKE_COORDINATOR: advance: minStep5000011 State->FrontStep: 5000010 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710662 at step: 5000011 FAKE_COORDINATOR: Erasing txId 281474976710662 FAKE_COORDINATOR: advance: minStep5000012 State->FrontStep: 5000011 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710661 at step: 5000012 FAKE_COORDINATOR: Erasing txId 281474976710661 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710660 at step: 5000013 FAKE_COORDINATOR: Erasing txId 281474976710660 FAKE_COORDINATOR: advance: minStep5000014 State->FrontStep: 5000013 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710659 at step: 5000014 FAKE_COORDINATOR: Erasing txId 281474976710659 FAKE_COORDINATOR: advance: minStep5000015 State->FrontStep: 5000014 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710658 at step: 5000015 FAKE_COORDINATOR: Erasing txId 281474976710658 FAKE_COORDINATOR: advance: minStep5000016 State->FrontStep: 5000015 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710692 at step: 5000016 FAKE_COORDINATOR: Erasing txId 281474976710692 FAKE_COORDINATOR: advance: minStep5000017 State->FrontStep: 5000016 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710687 at step: 5000017 FAKE_COORDINATOR: Erasing txId 281474976710687 FAKE_COORDINATOR: advance: minStep5000018 State->FrontStep: 5000017 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710691 at step: 5000018 FAKE_COORDINATOR: Erasing txId 281474976710691 FAKE_COORDINATOR: advance: minStep5000019 State->FrontStep: 5000018 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710686 at step: 5000019 FAKE_COORDINATOR: Erasing txId 281474976710686 FAKE_COORDINATOR: advance: minStep5000020 State->FrontStep: 5000019 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710690 at step: 5000020 FAKE_COORDINATOR: Erasing txId 281474976710690 FAKE_COORDINATOR: advance: minStep5000021 State->FrontStep: 5000020 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710685 at step: 5000021 FAKE_COORDINATOR: Erasing txId 281474976710685 FAKE_COORDINATOR: advance: minStep5000022 State->FrontStep: 5000021 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710689 at step: 5000022 FAKE_COORDINATOR: Erasing txId 281474976710689 FAKE_COORDINATOR: advance: minStep5000023 State->FrontStep: 5000022 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710684 at step: 5000023 FAKE_COORDINATOR: Erasing txId 281474976710684 FAKE_COORDINATOR: advance: minStep5000024 State->FrontStep: 5000023 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710688 at step: 5000024 FAKE_COORDINATOR: Erasing txId 281474976710688 FAKE_COORDINATOR: advance: minStep5000025 State->FrontStep: 5000024 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710683 at step: 5000025 FAKE_COORDINATOR: Erasing txId 281474976710683 FAKE_COORDINATOR: advance: minStep5000026 State->FrontStep: 5000025 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710682 at step: 5000026 FAKE_COORDINATOR: Erasing txId 281474976710682 FAKE_COORDINATOR: advance: minStep5000027 State->FrontStep: 5000026 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710677 at step: 5000027 FAKE_COORDINATOR: Erasing txId 281474976710677 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710681 at step: 5000028 FAKE_COORDINATOR: Erasing txId 281474976710681 FAKE_COORDINATOR: advance: minStep5000029 State->FrontStep: 5000028 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710676 at step: 5000029 FAKE_COORDINATOR: Erasing txId 281474976710676 FAKE_COORDINATOR: advance: minStep5000030 State->FrontStep: 5000029 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710680 at step: 5000030 FAKE_COORDINATOR: Erasing txId 281474976710680 FAKE_COORDINATOR: advance: minStep5000031 State->FrontStep: 5000030 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710675 at step: 5000031 FAKE_COORDINATOR: Erasing txId 281474976710675 FAKE_COORDINATOR: advance: minStep5000032 State->FrontStep: 5000031 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710679 at step: 5000032 FAKE_COORDINATOR: Erasing txId 281474976710679 FAKE_COORDINATOR: advance: minStep5000033 State->FrontStep: 5000032 FAKE_COORDINATOR: ... 0/.sys/top_queries_by_read_bytes_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.242982Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/resource_pool_classifiers', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.243333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720670 2026-01-08T15:36:32.243381Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720671 2026-01-08T15:36:32.243417Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/nodes', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.243594Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_cpu_time_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.243837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720672 2026-01-08T15:36:32.243904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720673 2026-01-08T15:36:32.243964Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_by_tli_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.244146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720674 2026-01-08T15:36:32.244193Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_cpu_time_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.244312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720675 2026-01-08T15:36:32.244353Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_by_tli_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.244478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720676 2026-01-08T15:36:32.244536Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_request_units_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.244715Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_one_hour', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.244901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720677 2026-01-08T15:36:32.245013Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/pg_tables', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720678 2026-01-08T15:36:32.245178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720679 2026-01-08T15:36:32.245263Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_queries_by_duration_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720680 2026-01-08T15:36:32.245376Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_group_members', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245438Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_users', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720681 2026-01-08T15:36:32.245546Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/resource_pools', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720682 2026-01-08T15:36:32.245624Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/streaming_queries', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245671Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720683 2026-01-08T15:36:32.245731Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_permissions', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720684 2026-01-08T15:36:32.245864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720685 2026-01-08T15:36:32.245907Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/top_partitions_one_minute', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.245992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6049: Schema modification rejected because of ReadOnly mode, at tablet: 72075186233409549 txid: 281474976720686 2026-01-08T15:36:32.246034Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_effective_permissions', reason: Schema is in ReadOnly mode 2026-01-08T15:36:32.246309Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard_sysviews_update.cpp:189: SysViewsRosterUpdate# [2:688:2595] at schemeshard: 72075186233409549, failed to create sys view '/Root/USER_0/.sys/auth_groups', reason: Schema is in ReadOnly mode TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2026-01-08T15:36:32.251923Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5805: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 40] 2026-01-08T15:36:32.252034Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5805: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 39] 2026-01-08T15:36:32.252470Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1466: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2026-01-08T15:36:32.252648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:573) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2026-01-08T15:36:32.284364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2026-01-08T15:36:32.285120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:6292: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2026-01-08T15:36:32.361695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 108:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) FAKEHIVE 72057594037968897 TEvCreateTablet Owner: 72075186233409549 OwnerIdx: 4 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 38 } ObjectId: 2 BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 38 } Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:777:2067] recipient: [2:774:2668] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:777:2067] recipient: [2:774:2668] Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:780:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:780:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409550 is [2:781:2672] sender: [2:782:2067] recipient: [2:774:2668] TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 0 TabletID: 72075186233409548 Forgetting tablet 72075186233409548 TestWaitNotification: OK eventTxId 108 2026-01-08T15:36:35.107141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:35.107231Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:35.170621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:36:35.170694Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/scheme_board/ut_cache/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:48.819958Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:48.948277Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:48.948656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:48.948806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:49.230070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:49.230178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:49.289436Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886605947463 != 1767886605947467 2026-01-08T15:36:49.300173Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:49.343668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:49.441357Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:49.725431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:49.738420Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:49.842344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:49.878208Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:49.878480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:49.928962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:49.929079Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:49.930739Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:49.930821Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:49.930896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:49.931272Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:49.931408Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:49.931495Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:49.942668Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:49.975776Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:49.975961Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:49.976047Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:49.976105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:49.976136Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:49.976174Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:49.976553Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:49.976627Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:49.976688Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:49.976717Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:49.976746Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:49.976772Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:49.977057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:49.977196Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:49.977389Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:49.977499Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:49.978869Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:49.989652Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:49.989781Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:50.106997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:50.111803Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:50.111903Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:50.112166Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:50.112211Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:50.112260Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:50.112522Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:50.112707Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:50.113472Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:50.113548Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:50.122500Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:50.123024Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:50.125272Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:50.125334Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:50.126013Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:50.126091Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:50.127217Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:50.127264Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:50.127312Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:50.127376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:50.127428Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:50.127529Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:50.132627Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:50.134245Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:50.134345Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:50.135155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:50.144856Z node 1 :KQP_WORKLOAD_SERV ... pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:37:00.458005Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:37:00.458334Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:37:00.458472Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:37:00.458686Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:00.458750Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:37:00.459260Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:37:00.459724Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:00.461854Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:37:00.461911Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:00.462972Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:37:00.463328Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:00.464408Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:00.464451Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:37:00.464496Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:37:00.464556Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:37:00.464611Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:37:00.464705Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:00.466268Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:00.467989Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:37:00.468154Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:37:00.468216Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:37:00.477833Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:00.477938Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:962:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:00.478374Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:00.479330Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:00.479667Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:00.483921Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:37:00.490800Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:00.606749Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:00.609821Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:37:00.644807Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:37:00.754927Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1053:2873], serverId# [3:1054:2874], sessionId# [0:0:0] 2026-01-08T15:37:00.755441Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:37:00.755685Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=5 2026-01-08T15:37:00.766752Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:00.771506Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:37:00.772571Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:37:00.784100Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:37:00.784193Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:00.784605Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:37:00.784657Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 3, at: 72075186224037888 2026-01-08T15:37:00.784830Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:37:00.784929Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:00.784981Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:00.785039Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:37:00.785117Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:00.786203Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:37:00.786618Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:37:00.786850Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:00.786900Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:00.786952Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:37:00.787212Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:00.787274Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:00.787923Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:37:00.788212Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:37:00.788398Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2026-01-08T15:37:00.788471Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2026-01-08T15:37:00.790597Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:37:00.790657Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037888 2026-01-08T15:37:00.790765Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:00.790805Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:00.790847Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2026-01-08T15:37:00.790976Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:00.791041Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:00.791094Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds >> TestSetCloudPermissions::CanSetAllPermissions [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetPermissionsWithoutGizmoResourse |88.6%| [TA] $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleTests::TestListTenantsExtSubdomain [GOOD] >> TConsoleTests::TestMergeConfig >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant >> BsControllerConfig::MergeBoxes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetAllPermissions [GOOD] Test command err: 2026-01-08T15:37:00.126084Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015216934295576:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:37:00.126152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:37:00.369057Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:37:00.381241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:37:00.381368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:37:00.396021Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:37:00.397309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:37:00.397756Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593015216934295549:2081] 1767886620124510 != 1767886620124513 2026-01-08T15:37:00.476292Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:519: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( ydb.schemas.getMetadata ydb.clusters.get ydb.clusters.manage ydb.databases.list ydb.clusters.monitor ydb.streams.write ydb.developerApi.update ydb.tables.write ydb.databases.create ydb.databases.connect ydb.developerApi.get ydb.tables.select) 2026-01-08T15:37:00.476366Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:83: [7cab67b003d0] Connect to grpc://localhost:23191 2026-01-08T15:37:00.485601Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7cab67b003d0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "database12345" type: "ydb.database" } resource_path { id: "folder12345" type: "resource-manager.folder" } permission: "ydb.schemas.getMetadata" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.get" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.manage" } items { resource_path { id: "database12345" type: "ydb.databa...(truncated) } 2026-01-08T15:37:00.494811Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [7cab67b003d0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2026-01-08T15:37:00.495185Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1875: Ticket **** (8E120919) () has now valid token of user1@as 2026-01-08T15:37:00.495317Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:718: Skip check permission connect db, user is a admin, database: /Root/db, user: user1@as, from ip: |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:34:55.471094Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:34:55.744970Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:34:55.745449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:34:55.745636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:34:56.370828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:34:56.370961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:34:56.479018Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886491167967 != 1767886491167971 2026-01-08T15:34:56.496107Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:34:56.547730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:34:56.646014Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:34:57.359298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:34:57.360447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:34:57.360506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:34:57.360541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:34:57.360735Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:34:57.376194Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2026-01-08T15:35:09.459034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:953:2820], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:09.459203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:964:2825], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:09.459310Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:09.460639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:969:2830], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:09.461090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:35:09.466218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:35:09.494020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:967:2828], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2026-01-08T15:35:09.563242Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:1020:2862] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 41], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:35:10.097942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:11.946148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:12.538179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:13.545686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:14.478344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:15.188997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2026-01-08T15:35:16.675750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:35:17.068120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2026-01-08T15:35:22.577953Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01kef3ync6dgmtdy6c0tvkf5js", SessionId: ydb://session/3?node_id=1&id=N2E4M2JiMjgtNDZhN2QwNjctNDY4MzA5MjUtYmU3NWYzM2Q=, Slow query, duration: 13.121504s, status: STATUS_CODE_UNSPECIFIED, user: root@builtin, results: 0b, text: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n ", parameters: 0b REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 2026-01-08T15:35:22.607333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7747: Cannot get console configs 2026-01-08T15:35:22.607415Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2026-01-08T15:35:33.787655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:268) REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2026-01-08T15:35:35.340559Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:227;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2026-01-08T15:35:35.340664Z node 1 :TX_TIERING DEBUG: log.cpp:841: fl ... ated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:49.497887Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2026-01-08T15:36:49.497917Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2026-01-08T15:36:49.497946Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:154 :Restarting tier '/Root/tier1' at tablet 0 2026-01-08T15:36:49.497975Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 2026-01-08T15:36:49.498008Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:174 :Tier '/Root/tier1' started at tablet 0 2026-01-08T15:36:49.498036Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:36:49.498077Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:36:49.498502Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037892;self_id=[1:3144:4502];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2026-01-08T15:36:49.498635Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037893;self_id=[1:3148:4504];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2026-01-08T15:36:49.498735Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037894;self_id=[1:3157:4511];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2026-01-08T15:37:00.690743Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.690839Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.690880Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.690931Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.691081Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.691241Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2026-01-08T15:37:00.691586Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.691672Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2026-01-08T15:37:00.691737Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.691778Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.691848Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.692163Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.692204Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2026-01-08T15:37:00.692237Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.692280Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.692439Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.692470Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2026-01-08T15:37:00.692499Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.692526Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.692563Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.693039Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.693083Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2026-01-08T15:37:00.693113Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693141Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693188Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.693226Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.693252Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2026-01-08T15:37:00.693281Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693306Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693345Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.693382Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:101;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2026-01-08T15:37:00.693411Z node 1 :TX_TIERING INFO: log.cpp:841: fline=manager.cpp:295;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2026-01-08T15:37:00.693439Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693467Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:208;event=skip_tier_manager_reloading;tier=/Root/tier2;has_secrets=1;found_tier_config=1; 2026-01-08T15:37:00.693508Z node 1 :TX_TIERING DEBUG: log.cpp:841: fline=manager.cpp:217;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=0}{id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2026-01-08T15:37:00.694066Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037892;self_id=[1:3144:4502];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2026-01-08T15:37:00.694193Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037893;self_id=[1:3148:4504];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2026-01-08T15:37:00.694394Z node 1 :TX_TIERING DEBUG: log.cpp:841: tablet_id=72075186224037894;self_id=[1:3157:4511];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037894 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037894 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 72075186224037894 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 72075186224037894 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:162 :Tier '/Root/tier1' stopped at tablet 0 E0108 15:37:02.505237613 205430 backup_poller.cc:113] run_poller: UNKNOWN:Timer list shutdown {created_time:"2026-01-08T15:37:02.505003591+00:00"} |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/tiering/ut/unittest >> KqpQueryPerf::IndexInsert+QueryService-UseSink >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] >> TestSetCloudPermissions::CanSetPermissionsForDbWithoutCloudUserAttributes [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> TConsoleTests::TestAlterBorrowedStorage [GOOD] >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MergeBoxes [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11114:2156] recipient: [1:10914:2167] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11114:2156] recipient: [1:10914:2167] Leader for TabletID 72057594037932033 is [1:11116:2169] sender: [1:11117:2156] recipient: [1:10914:2167] 2026-01-08T15:35:39.820850Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2026-01-08T15:35:39.822180Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2026-01-08T15:35:39.822600Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2026-01-08T15:35:39.825074Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2026-01-08T15:35:39.825566Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2026-01-08T15:35:39.826033Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2080} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.826064Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:829} Handle TEvInterconnect::TEvNodesInfo 2026-01-08T15:35:39.826575Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2026-01-08T15:35:39.836262Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2026-01-08T15:35:39.836412Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2026-01-08T15:35:39.836630Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2026-01-08T15:35:39.836750Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.836845Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2026-01-08T15:35:39.836916Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:11116:2169] sender: [1:11138:2156] recipient: [1:110:2157] 2026-01-08T15:35:39.851048Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2026-01-08T15:35:39.851205Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.916053Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2026-01-08T15:35:39.916239Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.916320Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2026-01-08T15:35:39.916392Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.916535Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2026-01-08T15:35:39.916603Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.916639Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2026-01-08T15:35:39.916707Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.929798Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2026-01-08T15:35:39.929967Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:39.940905Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2026-01-08T15:35:39.941073Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2026-01-08T15:35:39.942587Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:705} TTxLoadEverything Complete 2026-01-08T15:35:39.942644Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2211} LoadFinished 2026-01-08T15:35:39.942860Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2026-01-08T15:35:39.942923Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:710} TTxLoadEverything InitQueue processed 2026-01-08T15:35:39.962490Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk0" } Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" } Drive { Path: "/dev/disk3" } Drive { Path: "/dev/disk4" } Drive { Path: "/dev/disk5" } Drive { Path: "/dev/disk6" } Drive { Path: "/dev/disk7" } Drive { Path: "/dev/disk8" Type: SSD } Drive { Path: "/dev/disk9" Type: SSD } Drive { Path: "/dev/disk10" Type: SSD } Drive { Path: "/dev/disk11" Type: SSD } Drive { Path: "/dev/disk12" Type: SSD } Drive { Path: "/dev/disk13" Type: SSD } Drive { Path: "/dev/disk14" Type: SSD } Drive { Path: "/dev/disk15" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12061 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12062 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12063 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12064 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12065 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12066 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12067 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12068 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12069 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12070 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12071 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12072 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12073 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12074 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12075 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12076 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12077 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12078 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12079 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12080 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12081 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12082 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12083 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12084 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12085 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12086 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12087 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12088 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12089 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12090 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12091 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12092 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12093 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12094 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12095 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12096 } HostConfigId: 1 } Host { Ke ... } Create new pdisk PDiskId# 275:1002 Path# /dev/disk3 2026-01-08T15:36:54.382124Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 276:1000 Path# /dev/disk1 2026-01-08T15:36:54.382158Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 276:1001 Path# /dev/disk2 2026-01-08T15:36:54.382185Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 276:1002 Path# /dev/disk3 2026-01-08T15:36:54.382226Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 277:1000 Path# /dev/disk1 2026-01-08T15:36:54.382277Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 277:1001 Path# /dev/disk2 2026-01-08T15:36:54.382306Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 277:1002 Path# /dev/disk3 2026-01-08T15:36:54.382355Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 278:1000 Path# /dev/disk1 2026-01-08T15:36:54.382384Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 278:1001 Path# /dev/disk2 2026-01-08T15:36:54.382410Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 278:1002 Path# /dev/disk3 2026-01-08T15:36:54.382439Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 279:1000 Path# /dev/disk1 2026-01-08T15:36:54.382469Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 279:1001 Path# /dev/disk2 2026-01-08T15:36:54.382501Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 279:1002 Path# /dev/disk3 2026-01-08T15:36:54.382548Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 280:1000 Path# /dev/disk1 2026-01-08T15:36:54.382604Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 280:1001 Path# /dev/disk2 2026-01-08T15:36:54.382654Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 280:1002 Path# /dev/disk3 2026-01-08T15:36:54.382686Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 281:1000 Path# /dev/disk1 2026-01-08T15:36:54.382714Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 281:1001 Path# /dev/disk2 2026-01-08T15:36:54.382741Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 281:1002 Path# /dev/disk3 2026-01-08T15:36:54.382767Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 282:1000 Path# /dev/disk1 2026-01-08T15:36:54.382795Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 282:1001 Path# /dev/disk2 2026-01-08T15:36:54.382842Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 282:1002 Path# /dev/disk3 2026-01-08T15:36:54.382903Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 283:1000 Path# /dev/disk1 2026-01-08T15:36:54.382941Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 283:1001 Path# /dev/disk2 2026-01-08T15:36:54.382971Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 283:1002 Path# /dev/disk3 2026-01-08T15:36:54.382998Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 284:1000 Path# /dev/disk1 2026-01-08T15:36:54.383029Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 284:1001 Path# /dev/disk2 2026-01-08T15:36:54.383065Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 284:1002 Path# /dev/disk3 2026-01-08T15:36:54.383108Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 285:1000 Path# /dev/disk1 2026-01-08T15:36:54.383155Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 285:1001 Path# /dev/disk2 2026-01-08T15:36:54.383188Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 285:1002 Path# /dev/disk3 2026-01-08T15:36:54.383231Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 286:1000 Path# /dev/disk1 2026-01-08T15:36:54.383267Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 286:1001 Path# /dev/disk2 2026-01-08T15:36:54.383296Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 286:1002 Path# /dev/disk3 2026-01-08T15:36:54.383328Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 287:1000 Path# /dev/disk1 2026-01-08T15:36:54.383357Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 287:1001 Path# /dev/disk2 2026-01-08T15:36:54.383389Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 287:1002 Path# /dev/disk3 2026-01-08T15:36:54.383419Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 288:1000 Path# /dev/disk1 2026-01-08T15:36:54.383449Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 288:1001 Path# /dev/disk2 2026-01-08T15:36:54.383478Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 288:1002 Path# /dev/disk3 2026-01-08T15:36:54.383509Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 289:1000 Path# /dev/disk1 2026-01-08T15:36:54.383539Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 289:1001 Path# /dev/disk2 2026-01-08T15:36:54.383569Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 289:1002 Path# /dev/disk3 2026-01-08T15:36:54.383596Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 290:1000 Path# /dev/disk1 2026-01-08T15:36:54.383623Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 290:1001 Path# /dev/disk2 2026-01-08T15:36:54.383689Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 290:1002 Path# /dev/disk3 2026-01-08T15:36:54.383720Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 291:1000 Path# /dev/disk1 2026-01-08T15:36:54.383757Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 291:1001 Path# /dev/disk2 2026-01-08T15:36:54.383796Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 291:1002 Path# /dev/disk3 2026-01-08T15:36:54.383831Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 292:1000 Path# /dev/disk1 2026-01-08T15:36:54.383864Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 292:1001 Path# /dev/disk2 2026-01-08T15:36:54.383920Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 292:1002 Path# /dev/disk3 2026-01-08T15:36:54.383973Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 293:1000 Path# /dev/disk1 2026-01-08T15:36:54.384020Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 293:1001 Path# /dev/disk2 2026-01-08T15:36:54.384056Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 293:1002 Path# /dev/disk3 2026-01-08T15:36:54.384087Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 294:1000 Path# /dev/disk1 2026-01-08T15:36:54.384117Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 294:1001 Path# /dev/disk2 2026-01-08T15:36:54.384146Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 294:1002 Path# /dev/disk3 2026-01-08T15:36:54.384175Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 295:1000 Path# /dev/disk1 2026-01-08T15:36:54.384224Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 295:1001 Path# /dev/disk2 2026-01-08T15:36:54.384259Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 295:1002 Path# /dev/disk3 2026-01-08T15:36:54.384307Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 296:1000 Path# /dev/disk1 2026-01-08T15:36:54.384338Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 296:1001 Path# /dev/disk2 2026-01-08T15:36:54.384365Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 296:1002 Path# /dev/disk3 2026-01-08T15:36:54.384394Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 297:1000 Path# /dev/disk1 2026-01-08T15:36:54.384422Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 297:1001 Path# /dev/disk2 2026-01-08T15:36:54.384451Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 297:1002 Path# /dev/disk3 2026-01-08T15:36:54.384478Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 298:1000 Path# /dev/disk1 2026-01-08T15:36:54.384505Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 298:1001 Path# /dev/disk2 2026-01-08T15:36:54.384534Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 298:1002 Path# /dev/disk3 2026-01-08T15:36:54.384581Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 299:1000 Path# /dev/disk1 2026-01-08T15:36:54.384612Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 299:1001 Path# /dev/disk2 2026-01-08T15:36:54.384653Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 299:1002 Path# /dev/disk3 2026-01-08T15:36:54.384684Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 300:1000 Path# /dev/disk1 2026-01-08T15:36:54.384713Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 300:1001 Path# /dev/disk2 2026-01-08T15:36:54.384753Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:356} Create new pdisk PDiskId# 300:1002 Path# /dev/disk3 2026-01-08T15:36:54.669534Z node 251 :BS_CONTROLLER ERROR: {BSC07@impl.h:2204} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.289875s 2026-01-08T15:36:54.669738Z node 251 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:1009} StateWork event processing took too much time Type# 2146435078 Duration# 0.290104s 2026-01-08T15:36:54.712588Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { MergeBoxes { OriginBoxId: 2 OriginBoxGeneration: 1 TargetBoxId: 1 TargetBoxGeneration: 1 StoragePoolIdMap { OriginStoragePoolId: 1 TargetStoragePoolId: 2 } } } } 2026-01-08T15:36:54.736955Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:433} Execute TEvControllerConfigRequest Request# {Command { ReadBox { BoxId: 1 } } Command { QueryBaseConfig { } } } |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:50.147094Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:50.273518Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:50.273798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:50.273893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:50.561206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:50.561348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:50.640346Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886607678077 != 1767886607678081 2026-01-08T15:36:50.651428Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:50.697885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:50.795516Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:51.071833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:51.085343Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:51.188837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:51.236631Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:908:2781] 2026-01-08T15:36:51.237025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:51.287862Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:911:2783] 2026-01-08T15:36:51.288145Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:51.297607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:51.297966Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:915:2787] 2026-01-08T15:36:51.298179Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:51.307720Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:51.309693Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:51.309807Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:51.309890Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:51.310287Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:51.310759Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:51.310863Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:955:2781] in generation 1 2026-01-08T15:36:51.311542Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:51.311698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:51.313243Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:36:51.313312Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:36:51.313362Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:36:51.313669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:51.313791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:51.313854Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:956:2783] in generation 1 2026-01-08T15:36:51.314336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:51.314405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:51.315930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2026-01-08T15:36:51.316019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037890 2026-01-08T15:36:51.316080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037890 2026-01-08T15:36:51.316429Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:51.316538Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:51.316618Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037890 persisting started state actor id [1:957:2787] in generation 1 2026-01-08T15:36:51.327762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:51.355826Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:51.356057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:51.356206Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:961:2813] 2026-01-08T15:36:51.356263Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:51.356302Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:51.356341Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:51.356693Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:51.356744Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:36:51.356826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:51.356912Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:962:2814] 2026-01-08T15:36:51.356938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:36:51.356967Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:36:51.356991Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:36:51.357431Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:51.357470Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:36:51.357543Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:51.357597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037890, actorId: [1:963:2815] 2026-01-08T15:36:51.357644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037890 2026-01-08T15:36:51.357672Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2026-01-08T15:36:51.357702Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:36:51.357888Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:51.357989Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:51.358262Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:51.358305Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:51.358368Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:51.358412Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:51.358496Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:36:51.358562Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:36:51.358713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:898:2776], serverId# [1:912:2784], sessionId# [0:0:0] 2026-01-08T15:36:51.358759Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:36:51.358785Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:51.358809Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:36:51.358838Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:36:51.358869Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2026-01-08T15: ... node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 7, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2026-01-08T15:37:03.242074Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 7, finished edge# 0, front# 0 2026-01-08T15:37:03.243934Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 8, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2026-01-08T15:37:03.243965Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 8, finished edge# 0, front# 0 2026-01-08T15:37:03.244937Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2026-01-08T15:37:03.244979Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2026-01-08T15:37:03.245342Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:256: 72075186224037889 snapshot complete for split OpId 281474976715663 2026-01-08T15:37:03.245528Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 12 for split OpId 281474976715663 2026-01-08T15:37:03.245584Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 24 for split OpId 281474976715663 2026-01-08T15:37:03.245612Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 36 for split OpId 281474976715663 2026-01-08T15:37:03.245636Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 48 for split OpId 281474976715663 2026-01-08T15:37:03.246050Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 146 total snapshot size is 194 for split OpId 281474976715663 2026-01-08T15:37:03.246240Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 206 for split OpId 281474976715663 2026-01-08T15:37:03.246271Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 218 for split OpId 281474976715663 2026-01-08T15:37:03.246297Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 230 for split OpId 281474976715663 2026-01-08T15:37:03.246344Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 242 for split OpId 281474976715663 2026-01-08T15:37:03.246453Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 155 total snapshot size is 397 for split OpId 281474976715663 2026-01-08T15:37:03.246897Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:424: 72075186224037889 Sending snapshots from src for split OpId 281474976715663 2026-01-08T15:37:03.247083Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2361: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037892 size 221 2026-01-08T15:37:03.247183Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2361: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037891 size 215 2026-01-08T15:37:03.247464Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037892, clientId# [3:1381:3096], serverId# [3:1383:3098], sessionId# [0:0:0] 2026-01-08T15:37:03.247543Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037891, clientId# [3:1382:3097], serverId# [3:1384:3099], sessionId# [0:0:0] 2026-01-08T15:37:03.247623Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037892 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2026-01-08T15:37:03.248391Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037891 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2026-01-08T15:37:03.249946Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715663 2026-01-08T15:37:03.250123Z node 3 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037892 2026-01-08T15:37:03.250239Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:37:03.250366Z node 3 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037892 2026-01-08T15:37:03.250464Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037892, actorId: [3:1387:3102] 2026-01-08T15:37:03.250512Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037892 2026-01-08T15:37:03.250568Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037892 2026-01-08T15:37:03.250609Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2026-01-08T15:37:03.250761Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715663 2026-01-08T15:37:03.251606Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 3000 2026-01-08T15:37:03.251704Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2026-01-08T15:37:03.251960Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2026-01-08T15:37:03.252000Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:03.252030Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2026-01-08T15:37:03.252060Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2026-01-08T15:37:03.252194Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037891 ack snapshot OpId 281474976715663 2026-01-08T15:37:03.252287Z node 3 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state Ready tabletId 72075186224037891 2026-01-08T15:37:03.252350Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:37:03.252397Z node 3 :TX_DATASHARD INFO: datashard.cpp:1310: Change sender killed: at tablet: 72075186224037891 2026-01-08T15:37:03.252441Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037891, actorId: [3:1389:3104] 2026-01-08T15:37:03.252463Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037891 2026-01-08T15:37:03.252500Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037891 2026-01-08T15:37:03.252524Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2026-01-08T15:37:03.252577Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037892, clientId# [3:1381:3096], serverId# [3:1383:3098], sessionId# [0:0:0] 2026-01-08T15:37:03.252702Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715663 2026-01-08T15:37:03.253317Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2026-01-08T15:37:03.253348Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:03.253370Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2026-01-08T15:37:03.253393Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2026-01-08T15:37:03.253440Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1382:3097], serverId# [3:1384:3099], sessionId# [0:0:0] 2026-01-08T15:37:03.253612Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 3000 2026-01-08T15:37:03.253639Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2026-01-08T15:37:03.253669Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 2500 next step 3000 2026-01-08T15:37:03.253724Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2824: CheckMediatorStateRestored at 72075186224037892: waitStep# 3000 readStep# 3000 observedStep# 3000 2026-01-08T15:37:03.254016Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 2500 next step 3000 2026-01-08T15:37:03.254056Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2824: CheckMediatorStateRestored at 72075186224037891: waitStep# 3000 readStep# 3000 observedStep# 3000 2026-01-08T15:37:03.275799Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715663 2026-01-08T15:37:03.279389Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715663, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2026-01-08T15:37:03.282412Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037889 2026-01-08T15:37:03.282490Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 4, at: 72075186224037889 2026-01-08T15:37:03.282616Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1275:3017], serverId# [3:1276:3018], sessionId# [0:0:0] 2026-01-08T15:37:03.282793Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:37:03.282842Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037889 state 5 2026-01-08T15:37:03.283052Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715663 2026-01-08T15:37:03.283129Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2026-01-08T15:37:03.283183Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetPermissionsForDbWithoutCloudUserAttributes [GOOD] Test command err: 2026-01-08T15:37:01.376744Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015224047164272:2069];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:37:01.376800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:37:01.592113Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:37:01.616515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:37:01.616644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:37:01.659782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:37:01.720963Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:37:01.722279Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593015224047164243:2081] 1767886621375440 != 1767886621375443 2026-01-08T15:37:01.753375Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:37:01.755085Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:519: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( ydb.clusters.get ydb.clusters.manage ydb.clusters.monitor) 2026-01-08T15:37:01.755153Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:83: [7c807b7003d0] Connect to grpc://localhost:22798 2026-01-08T15:37:01.763467Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7c807b7003d0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.get" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.manage" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.monitor" } } result_filter: ALL_FAILED } 2026-01-08T15:37:01.771738Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [7c807b7003d0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2026-01-08T15:37:01.772026Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1875: Ticket **** (8E120919) () has now valid token of user1@as 2026-01-08T15:37:01.772160Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:718: Skip check permission connect db, user is a admin, database: /Root/db, user: user1@as, from ip: |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:52.500693Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:52.598704Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:52.599130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:52.599314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:52.891928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:52.892087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:52.968187Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886609860438 != 1767886609860442 2026-01-08T15:36:52.981061Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:53.025676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:53.125842Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:53.440716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:53.455456Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:53.574776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:53.627484Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:53.627791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:53.675869Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:53.675994Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:53.677990Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:53.678097Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:53.678155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:53.678578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:53.678763Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:53.678865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:53.689786Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:53.720261Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:53.720514Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:53.720662Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:53.720711Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:53.720747Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:53.720801Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.721381Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:53.721522Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:53.721625Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.721674Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:53.721724Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:53.721766Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.722188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:53.722383Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:53.722733Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:53.722863Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:53.724842Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:53.735780Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:53.735921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:53.861988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:53.867771Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:53.867849Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.868132Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.868181Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:53.868238Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:53.868592Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:53.868784Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:53.869549Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:53.869611Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:53.871949Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:53.872528Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:53.874637Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:53.874688Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.875378Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:53.875460Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.876674Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:53.876715Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:53.876827Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:53.876907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:53.876963Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:53.877051Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:53.882186Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:53.884008Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:53.884087Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:53.884833Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:53.894841Z node 1 :KQP_WORKLOAD_SERV ... pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:37:03.327226Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:37:03.327567Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:37:03.327751Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:37:03.327990Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:03.328062Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:37:03.328568Z node 3 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:37:03.329104Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:03.331675Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:37:03.331743Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:03.332925Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:37:03.333012Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:03.334373Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:03.334424Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:37:03.334480Z node 3 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:37:03.334556Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:37:03.334614Z node 3 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:37:03.334752Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:03.336739Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:03.338807Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:37:03.339002Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:37:03.339079Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:37:03.350129Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:951:2809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.350271Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:962:2814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.350717Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.351734Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:967:2819], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.352063Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.356846Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:37:03.364569Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:03.483101Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:03.486526Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:965:2817], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2026-01-08T15:37:03.521400Z node 3 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [3:1023:2856] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:37:03.668212Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1053:2873], serverId# [3:1054:2874], sessionId# [0:0:0] 2026-01-08T15:37:03.668700Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:262: Executing write operation for [0:2] at 72075186224037888 2026-01-08T15:37:03.668942Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:465: Executed write operation for [0:2] at 72075186224037888, row count=5 2026-01-08T15:37:03.680091Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:03.685330Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:37:03.686820Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:27: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2026-01-08T15:37:03.698594Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:81: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2026-01-08T15:37:03.698698Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:37:03.699160Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:37:03.699225Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 3, at: 72075186224037888 2026-01-08T15:37:03.699429Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037888, clientId# [3:1061:2880], serverId# [3:1062:2881], sessionId# [0:0:0] 2026-01-08T15:37:03.699549Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:03.699605Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:03.699681Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:37:03.699755Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:03.700925Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:37:03.701745Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:37:03.701992Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:03.702050Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:03.702123Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2026-01-08T15:37:03.702430Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:03.702507Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:03.703160Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2026-01-08T15:37:03.703548Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 48, Rows: 0, PendingAcks: 1, MessageQuota: 0 2026-01-08T15:37:03.703745Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2026-01-08T15:37:03.703808Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2026-01-08T15:37:03.706012Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037888 2026-01-08T15:37:03.706070Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715661, at: 72075186224037888 2026-01-08T15:37:03.706175Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:37:03.706215Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:03.706257Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2026-01-08T15:37:03.706417Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:03.706482Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:37:03.706535Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:56.819629Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:56.910068Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:56.910379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:56.910477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:57.123137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:57.123233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:57.185092Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886614302685 != 1767886614302689 2026-01-08T15:36:57.192396Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:57.237223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:57.340737Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:57.643169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:57.656513Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:57.776914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:57.823740Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:886:2765] 2026-01-08T15:36:57.824015Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:57.872958Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:57.873088Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:57.874933Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:57.875023Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:57.875090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:57.875485Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:57.875672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:57.875781Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:901:2765] in generation 1 2026-01-08T15:36:57.886610Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:57.928969Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:57.929212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:57.929346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:903:2775] 2026-01-08T15:36:57.929394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:57.929437Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:57.929491Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:57.930066Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:57.930195Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:57.930299Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:57.930360Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:57.930404Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:57.930443Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:57.930873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:882:2762], serverId# [1:887:2766], sessionId# [0:0:0] 2026-01-08T15:36:57.931035Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2026-01-08T15:36:57.931340Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2026-01-08T15:36:57.931454Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2026-01-08T15:36:57.933298Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:57.944074Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2026-01-08T15:36:57.944212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037888 not sending time cast registration request in state WaitScheme 2026-01-08T15:36:58.061250Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:917:2783], serverId# [1:919:2785], sessionId# [0:0:0] 2026-01-08T15:36:58.065261Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 2000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 2000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2026-01-08T15:36:58.065357Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.065586Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:58.065628Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:36:58.065681Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2000:281474976715657] in PlanQueue unit at 72075186224037888 2026-01-08T15:36:58.065900Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2000:281474976715657 keys extracted: 0 2026-01-08T15:36:58.066050Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:36:58.066813Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:58.066890Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 38] schema version# 1 2026-01-08T15:36:58.075838Z node 1 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:36:58.076266Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:58.077988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2026-01-08T15:36:58.078031Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.078676Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2000} 2026-01-08T15:36:58.078744Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.079565Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:58.079599Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:58.079653Z node 1 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037888 2026-01-08T15:36:58.079702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [2000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:400:2399], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:36:58.079741Z node 1 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2026-01-08T15:36:58.079800Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:58.091226Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:36:58.093049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2026-01-08T15:36:58.093133Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2026-01-08T15:36:58.093837Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 2000 2026-01-08T15:36:58.102731Z node 1 :KQP_WORKLOAD_SERV ... ain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:37:04.249409Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2026-01-08T15:37:04.249460Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2026-01-08T15:37:04.287373Z node 2 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1452:3222] 2026-01-08T15:37:04.287604Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:37:04.296230Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:37:04.296374Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:37:04.297721Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2026-01-08T15:37:04.297798Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037894 2026-01-08T15:37:04.297845Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037894 2026-01-08T15:37:04.298178Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:37:04.298344Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:37:04.298412Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037894 persisting started state actor id [2:1468:3222] in generation 1 2026-01-08T15:37:04.319593Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:37:04.319722Z node 2 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037894 2026-01-08T15:37:04.319836Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:37:04.319907Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037894, actorId: [2:1470:3232] 2026-01-08T15:37:04.319941Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037894 2026-01-08T15:37:04.319973Z node 2 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2026-01-08T15:37:04.320008Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2026-01-08T15:37:04.320472Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2026-01-08T15:37:04.320577Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2026-01-08T15:37:04.320976Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2026-01-08T15:37:04.321019Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:04.321054Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2026-01-08T15:37:04.321104Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2026-01-08T15:37:04.321232Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037894, clientId# [2:1451:3221], serverId# [2:1455:3223], sessionId# [0:0:0] 2026-01-08T15:37:04.321380Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2026-01-08T15:37:04.321586Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:133: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2026-01-08T15:37:04.321662Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:221: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2026-01-08T15:37:04.322094Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2026-01-08T15:37:04.332809Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2026-01-08T15:37:04.332922Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:468: 72075186224037894 not sending time cast registration request in state WaitScheme 2026-01-08T15:37:04.461372Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037894, clientId# [2:1476:3238], serverId# [2:1478:3240], sessionId# [0:0:0] 2026-01-08T15:37:04.462497Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 5000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 5000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2026-01-08T15:37:04.462555Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2026-01-08T15:37:04.463407Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2026-01-08T15:37:04.463461Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2026-01-08T15:37:04.463506Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [5000:281474976715663] in PlanQueue unit at 72075186224037894 2026-01-08T15:37:04.463808Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 5000:281474976715663 keys extracted: 0 2026-01-08T15:37:04.463941Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:37:04.464441Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2026-01-08T15:37:04.464506Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 44] schema version# 1 2026-01-08T15:37:04.464940Z node 2 :TX_DATASHARD INFO: datashard.cpp:476: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2026-01-08T15:37:04.465334Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:04.466923Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3772: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 4500 2026-01-08T15:37:04.466965Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2026-01-08T15:37:04.468142Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 5000} 2026-01-08T15:37:04.468205Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2026-01-08T15:37:04.470249Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2026-01-08T15:37:04.470381Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2026-01-08T15:37:04.470479Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2026-01-08T15:37:04.470535Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2026-01-08T15:37:04.470635Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2026-01-08T15:37:04.470695Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2026-01-08T15:37:04.470758Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:180: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2026-01-08T15:37:04.471019Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2026-01-08T15:37:04.471063Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037894 2026-01-08T15:37:04.471109Z node 2 :TX_DATASHARD INFO: datashard.cpp:1302: Change sender activated: at tablet: 72075186224037894 2026-01-08T15:37:04.471172Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [5000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:399:2398], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:37:04.471246Z node 2 :TX_DATASHARD INFO: datashard.cpp:1599: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2026-01-08T15:37:04.471327Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2026-01-08T15:37:04.471854Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3790: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 5000 2026-01-08T15:37:04.478180Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2965: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2026-01-08T15:37:04.478270Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2026-01-08T15:37:04.484172Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037894, clientId# [2:1506:3262], serverId# [2:1507:3263], sessionId# [0:0:0] 2026-01-08T15:37:04.484391Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1506:3262], serverId# [2:1507:3263], sessionId# [0:0:0] 2026-01-08T15:37:04.485547Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037894, clientId# [2:1511:3267], serverId# [2:1512:3268], sessionId# [0:0:0] 2026-01-08T15:37:04.485696Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1511:3267], serverId# [2:1512:3268], sessionId# [0:0:0] 2026-01-08T15:37:04.487872Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037894, clientId# [2:1516:3272], serverId# [2:1517:3273], sessionId# [0:0:0] 2026-01-08T15:37:04.488055Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1516:3272], serverId# [2:1517:3273], sessionId# [0:0:0] |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> BasicStatistics::TwoNodes [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] |88.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |88.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> TestSetCloudPermissions::CanSetPermissionsWithoutGizmoResourse [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> PgCatalog::PgRoles [GOOD] >> PgCatalog::PgTables |88.6%| [TA] $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2026-01-08T15:37:04.356201Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.356242Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.356273Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:04.356638Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:04.357142Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:04.367286Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.367693Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2026-01-08T15:37:04.368639Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:04.368832Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:37:04.369076Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:37:04.369238Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:37:04.369317Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:04.369343Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:37:04.369366Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:37:04.369457Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.369484Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:37:04.369510Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:37:04.369523Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:37:04.369592Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.369673Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2026-01-08T15:37:04.369687Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2026-01-08T15:37:04.369698Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:37:04.369753Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.369770Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2026-01-08T15:37:04.369789Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2026-01-08T15:37:04.369807Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:37:04.369865Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2026-01-08T15:37:04.370999Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.371020Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.371042Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:04.371332Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:04.371744Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:04.371880Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:04.372064Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2026-01-08T15:37:04.372741Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:04.372892Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2026-01-08T15:37:04.373188Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2026-01-08T15:37:04.373425Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2026-01-08T15:37:04.373512Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:04.373541Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:37:04.373570Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:37:04.373679Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.373705Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:37:04.373718Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2026-01-08T15:37:04.373737Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:37:04.373830Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.373874Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2026-01-08T15:37:04.373885Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2026-01-08T15:37:04.373895Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2026-01-08T15:37:04.373931Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2026-01-08T15:37:04.373947Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2026-01-08T15:37:04.373958Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2026-01-08T15:37:04.373971Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00 ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:37:06.234243Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2026-01-08T15:37:06.314821Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:37:06.314899Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:37:06.314953Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.315274Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.315794Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.316014Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2026-01-08T15:37:06.316275Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2026-01-08T15:37:06.421853Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2026-01-08T15:37:06.422895Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.424742Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:37:06.427598Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:37:06.428610Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2026-01-08T15:37:06.439287Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2026-01-08T15:37:06.440168Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2026-01-08T15:37:06.440989Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2026-01-08T15:37:06.441843Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2026-01-08T15:37:06.450697Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2026-01-08T15:37:06.451565Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2026-01-08T15:37:06.451648Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2026-01-08T15:37:06.451820Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2026-01-08T15:37:06.455543Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2026-01-08T15:37:06.463940Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.463964Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.463984Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.464368Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.464799Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.464941Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.465176Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.465555Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2026-01-08T15:37:06.466736Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.466776Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.466816Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.467067Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.467486Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.467786Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.468289Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.468430Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:06.468578Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.468628Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:37:06.468754Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 |88.6%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> TConsoleTests::TestMergeConfig [GOOD] >> TConsoleTests::TestCreateSubSubDomain >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] |88.6%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest >> TestSetCloudPermissions::CanSetPermissionsWithoutGizmoResourse [GOOD] Test command err: 2026-01-08T15:37:03.365137Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593015232578856280:2067];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:37:03.365203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:37:03.553573Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:37:03.563221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:37:03.563390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:37:03.609694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:37:03.670929Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:37:03.673258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593015232578856253:2081] 1767886623363209 != 1767886623363212 2026-01-08T15:37:03.714957Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:519: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( ydb.schemas.getMetadata ydb.clusters.get ydb.clusters.manage ydb.databases.list ydb.clusters.monitor ydb.streams.write ydb.tables.write ydb.databases.create ydb.databases.connect ydb.tables.select) 2026-01-08T15:37:03.715037Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:83: [7d598b9003d0] Connect to grpc://localhost:17406 2026-01-08T15:37:03.718479Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:122: [7d598b9003d0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "database12345" type: "ydb.database" } resource_path { id: "folder12345" type: "resource-manager.folder" } permission: "ydb.schemas.getMetadata" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.get" } items { resource_path { id: "cluster.folder98765" type: "resource-manager.folder" } permission: "ydb.clusters.manage" } items { resource_path { id: "database12345" type: "ydb.databa...(truncated) } 2026-01-08T15:37:03.731747Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [7d598b9003d0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2026-01-08T15:37:03.732115Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1875: Ticket **** (8E120919) () has now valid token of user1@as 2026-01-08T15:37:03.733696Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:718: Skip check permission connect db, user is a admin, database: /Root/db, user: user1@as, from ip: ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2026-01-08T15:37:06.372916Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.373013Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.373049Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.373552Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:37:06.373617Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.373649Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.374870Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008603s 2026-01-08T15:37:06.375489Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.376063Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.376191Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.377725Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.377745Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.377778Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.378136Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:37:06.378196Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.378225Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.378359Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.007722s 2026-01-08T15:37:06.378692Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.379044Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.379145Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.380067Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.380092Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.380112Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.380489Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:37:06.380550Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.380588Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.380661Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.244970s 2026-01-08T15:37:06.381178Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.381548Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.381641Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.382340Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.382362Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.382376Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.382685Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:37:06.382730Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.382749Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.382811Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.231632s 2026-01-08T15:37:06.383242Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.383780Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.383921Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.385338Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.385368Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.385389Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.385666Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.386066Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.397580Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.398035Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2026-01-08T15:37:06.398087Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.398116Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.398206Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.193419s 2026-01-08T15:37:06.398773Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:37:06.400113Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.400143Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.400183Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.400563Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.401028Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.401301Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.401589Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.402374Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:06.402872Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:06.403099Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2026-01-08T15:37:06.403224Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:06.403377Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.403413Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2026-01-08T15:37:06.403470Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:37:06.403525Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2026-01-08T15:37:06.405615Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.405644Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.405669Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.405990Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.406677Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.406827Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.407751Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.508897Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.509166Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:37:06.509245Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.509306Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:37:06.509376Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:37:06.609766Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:37:06.612017Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:37:06.613344Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.613369Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.613393Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.613768Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.614267Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.614489Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.616093Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.719380Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.719595Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:37:06.719683Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.719725Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:37:06.719841Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2026-01-08T15:37:06.719945Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:37:06.723697Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:37:06.723754Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:37:06.723913Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2026-01-08T15:37:06.144988Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.145019Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.145050Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.145485Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.145990Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.146068Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.147055Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.147081Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.147101Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.147405Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.147730Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2026-01-08T15:37:06.147784Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.150207Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.150230Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.150253Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.150657Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2026-01-08T15:37:06.150710Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.150740Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.150903Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2026-01-08T15:37:06.152060Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.152096Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.152115Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.152526Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2026-01-08T15:37:06.152566Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.152600Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.152687Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2026-01-08T15:37:06.153877Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:37:06.153907Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:37:06.153936Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.154200Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.154728Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.166307Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:37:06.167534Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.167911Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2026-01-08T15:37:06.171808Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2026-01-08T15:37:06.172073Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.172114Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:37:06.172152Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2026-01-08T15:37:06.172175Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2026-01-08T15:37:06.172204Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2026-01-08T15:37:06.172225Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2026-01-08T15:37:06.172250Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2026-01-08T15:37:06.172269Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2026-01-08T15:37:06.172301Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2026-01-08T15:37:06.172319Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2026-01-08T15:37:06.172337Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2026-01-08T15:37:06.172370Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2026-01-08T15:37:06.172388Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2026-01-08T15:37:06.172405Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2026-01-08T15:37:06.172422Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2026-01-08T15:37:06.172442Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2026-01-08T15:37:06.172503Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2026-01-08T15:37:06.172522Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2026-01-08T15:37:06.172539Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2026-01-08T15:37:06.172569Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2026-01-08T15:37:06.172590Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2026-01-08T15:37:06.172607Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2026-01-08T15:37:06.172628Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2026-01-08T15:37:06.172646Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2026-01-08T15:37:06.172664Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2026-01-08T15:37:06.172692Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2026-01-08T15:37:06.172714Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2026-01-08T15:37:06.172746Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2026-01-08T15:37:06.172763Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2026-01-08T15:37:06.172781Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2026-01-08T15:37:06.172797Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2026-01-08T15:37:06.172815Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2026-01-08T15:37:06.172884Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2026-01-08T15:37:06.172909Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2026-01-08T15:37:06.172937Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2026-01-08T15:37:06.172959Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2026-01-08T15:37:06.172977Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2026-01-08T15:37:06.172994Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2026-01-08T15:37:06.173014Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2026-01-08T15:37:06.173032Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2026-01-08T15:37:06.173048Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2026-01-08T15:37:06.173070Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2026-01-08T15:37:06.173086Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2026-01-08T15:37:06.173117Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2026-01-08T15:37:06.173134Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2026-01-08T15:37:06.173151Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2026-01-08T15:37:06.173168Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2026-01-08T15:37:06.173191Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2026-01-08T15:37:06.173207Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2026-01-08T15:37:06.173225Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2026-01-08T15:37:06.173296Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2026-01-08T15:37:06.175812Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2026-01-08T15:37:06.176006Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2026-01-08T15:37:06.176041Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2026-01-08T15:37:06.176062Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2026-01-08T15:37:06.176081Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2026-01-08T15:37:06.176104Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2026-01-08T15:37:06.176122Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2026-01-08T15:37:06.176142Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2026-01-08T15:37:06.176162Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2026-01-08T15:37:06.176194Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2026-01-08T15:37:06.176213Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2026-01-08T15:37:06.176231Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2026-01-08T15:37:06.176269Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2026-01-08T15:37:06.176290Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2026-01-08T15:37:06.176308Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2026-01-08T15:37:06.176331Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2026-01-08T15:37:06.176360Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2026-01-08T15:37:06.176413Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2026-01-08T15:37:06.176434Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2026-01-08T15:37:06.176480Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2026-01-08T15:37:06.176500Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2026-01-08T15:37:06.176518Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2026-01-08T15:37:06.176535Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2026-01-08T15:37:06.176552Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2026-01-08T15:37:06.176570Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2026-01-08T15:37:06.176601Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2026-01-08T15:37:06.176618Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2026-01-08T15:37:06.176634Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2026-01-08T15:37:06.176664Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2026-01-08T15:37:06.176684Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2026-01-08T15:37:06.176701Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2026-01-08T15:37:06.176722Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2026-01-08T15:37:06.176749Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2026-01-08T15:37:06.176834Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2026-01-08T15:37:06.176857Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2026-01-08T15:37:06.176874Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2026-01-08T15:37:06.176890Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2026-01-08T15:37:06.176907Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2026-01-08T15:37:06.176924Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2026-01-08T15:37:06.176941Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2026-01-08T15:37:06.176957Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2026-01-08T15:37:06.176989Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2026-01-08T15:37:06.177011Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2026-01-08T15:37:06.177028Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2026-01-08T15:37:06.177046Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2026-01-08T15:37:06.177063Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2026-01-08T15:37:06.177080Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2026-01-08T15:37:06.177101Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2026-01-08T15:37:06.177128Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2026-01-08T15:37:06.177148Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2026-01-08T15:37:06.177164Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2026-01-08T15:37:06.177220Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2026-01-08T15:37:06.177365Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2026-01-08T15:37:06.178836Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.178866Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.178894Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.180590Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.181174Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.181400Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.181804Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.283375Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.283660Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:37:06.283734Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.283799Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:37:06.283870Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:37:06.484441Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2026-01-08T15:37:06.584958Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:37:06.585127Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:37:06.585299Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2026-01-08T15:37:06.586369Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.586400Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.586422Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:06.586846Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:06.587439Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:06.587655Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.588158Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:06.688896Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:06.689202Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:37:06.689273Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:06.689348Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2026-01-08T15:37:06.689432Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2026-01-08T15:37:06.689543Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2026-01-08T15:37:06.689639Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2026-01-08T15:37:06.689715Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:37:06.689816Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/grpc_services/grpc_request_check_actor_ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest |88.6%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest |88.6%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:17.400355Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:17.524965Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:36:17.535210Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:529:2407], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:17.535637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:17.535840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:18.042697Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:18.161912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:18.162060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:18.226749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:18.360890Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:19.299646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:19.300703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:19.300772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:19.300808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:19.301357Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:19.392803Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:20.019672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2026-01-08T15:36:27.391462Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:27.391829Z node 3 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:27.402700Z node 2 :STATISTICS INFO: service_impl.cpp:234: Subscribed for config changes on node 2 2026-01-08T15:36:27.402787Z node 3 :STATISTICS INFO: service_impl.cpp:234: Subscribed for config changes on node 3 2026-01-08T15:36:27.409670Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:125: [WorkloadService] [Service] Failed to discover tenant nodes 2026-01-08T15:36:27.493155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:27.493310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:27.493897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:27.493979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:27.526110Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2026-01-08T15:36:27.526251Z node 1 :HIVE WARN: hive_impl.cpp:814: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2026-01-08T15:36:27.529033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:27.529467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:27.807709Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:27.807842Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:27.809388Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.810121Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.813027Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.815164Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.815297Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.815381Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.815637Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.815812Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.816066Z node 2 :HIVE WARN: tx__create_tablet.cpp:348: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2026-01-08T15:36:27.839162Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:28.053591Z node 2 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:28.197756Z node 2 :STATISTICS INFO: aggregator_impl.cpp:43: [72075186224037894] OnActivateExecutor 2026-01-08T15:36:28.197869Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2026-01-08T15:36:28.298845Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2026-01-08T15:36:28.300655Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2026-01-08T15:36:28.300880Z node 2 :STATISTICS DEBUG: tx_init.cpp:124: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2026-01-08T15:36:28.300944Z node 2 :STATISTICS DEBUG: tx_init.cpp:149: [72075186224037894] Loaded ColumnStatistics: column count# 0 2026-01-08T15:36:28.301003Z node 2 :STATISTICS DEBUG: tx_init.cpp:188: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2026-01-08T15:36:28.301068Z node 2 :STATISTICS DEBUG: tx_init.cpp:224: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2026-01-08T15:36:28.301133Z node 2 :STATISTICS DEBUG: tx_init.cpp:273: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2026-01-08T15:36:28.301190Z node 2 :STATISTICS DEBUG: tx_init.cpp:280: [72075186224037894] TTxInit::Complete 2026-01-08T15:36:28.307617Z node 2 :STATISTICS INFO: aggregator_impl.cpp:69: [72075186224037894] Subscribed for config changes 2026-01-08T15:36:28.333120Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8331: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2026-01-08T15:36:28.333243Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8361: ConnectToSA(), pipe client id: [2:2530:2587], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2026-01-08T15:36:28.396907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:2572:2623] 2026-01-08T15:36:28.398783Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:224: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2572:2623], schemeshard id = 72075186224037897 2026-01-08T15:36:28.546226Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:2664:2663] 2026-01-08T15:36:28.548219Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2026-01-08T15:36:28.576013Z node 2 :STATISTICS DEBUG: table_creator.cpp:191: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. Describe result: PathErrorUnknown 2026-01-08T15:36:28.576100Z node 2 :STATISTICS NOTICE: table_creator.cpp:211: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. Creating table 2026-01-08T15:36:28.576204Z node 2 :STATISTICS DEBUG: table_creator.cpp:112: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. Created ESchemeOpCreateTable transaction for path: /Root/Database/.metadata/_statistics 2026-01-08T15:36:28.595346Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:307: Path does not belong to the specified domain: self# [2:2724:2692], domain# [OwnerId: 72057594046644480, LocalPathId: 38], path's domain# [OwnerId: 72057594046644480, LocalPathId: 1] 2026-01-08T15:36:28.600196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:28.663881Z node 2 :STATISTICS DEBUG: table_creator.cpp:229: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 33 } 2026-01-08T15:36:28.664083Z node 2 :STATISTICS DEBUG: table_creator.cpp:304: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. Subscribe on create table tx: 281474976725657 2026-01-08T15:36:28.696123Z node 2 :STATISTICS DEBUG: table_creator.cpp:329: Table _statistics updater. SelfId: [2:2669:2668] Owner: [2:2668:2667]. Subscribe on tx: 281474976725657 registered 2026-01-08T15:36:29.324432Z node 2 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/Databa ... ] RequestId[ 23 ], ReplyToActorId[ [2:4323:3319]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:51.745622Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 23 ] 2026-01-08T15:36:51.745650Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 23, ReplyToActorId = [2:4323:3319], StatRequests.size() = 1 2026-01-08T15:36:52.725038Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 24 ], ReplyToActorId[ [2:4354:3329]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:52.725389Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 24 ] 2026-01-08T15:36:52.725436Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 24, ReplyToActorId = [2:4354:3329], StatRequests.size() = 1 2026-01-08T15:36:53.194049Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:354: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2026-01-08T15:36:53.194192Z node 2 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 2 cookie: 5 2026-01-08T15:36:53.194304Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:258: [72075186224037894] EvPropagateStatisticsResponse, cookie: 5 2026-01-08T15:36:53.194532Z node 1 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 1 cookie: 0 2026-01-08T15:36:53.788470Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 25 ], ReplyToActorId[ [2:4385:3339]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:53.788797Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 25 ] 2026-01-08T15:36:53.788841Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 25, ReplyToActorId = [2:4385:3339], StatRequests.size() = 1 2026-01-08T15:36:54.885103Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 26 ], ReplyToActorId[ [2:4420:3351]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:54.885423Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 26 ] 2026-01-08T15:36:54.885461Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 26, ReplyToActorId = [2:4420:3351], StatRequests.size() = 1 2026-01-08T15:36:55.974304Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 27 ], ReplyToActorId[ [2:4455:3361]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:55.974690Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 27 ] 2026-01-08T15:36:55.974743Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 27, ReplyToActorId = [2:4455:3361], StatRequests.size() = 1 2026-01-08T15:36:56.591408Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:309: [72075186224037894] EvPropagateTimeout 2026-01-08T15:36:56.761775Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:8331: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2026-01-08T15:36:56.761876Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:8343: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2026-01-08T15:36:56.761923Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:8374: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2026-01-08T15:36:56.761978Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:8300: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2026-01-08T15:36:57.350903Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 28 ], ReplyToActorId[ [2:4498:3376]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:57.351205Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 28 ] 2026-01-08T15:36:57.351242Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 28, ReplyToActorId = [2:4498:3376], StatRequests.size() = 1 2026-01-08T15:36:58.432833Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 29 ], ReplyToActorId[ [2:4533:3388]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:58.433106Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 29 ] 2026-01-08T15:36:58.433159Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 29, ReplyToActorId = [2:4533:3388], StatRequests.size() = 1 2026-01-08T15:36:59.058968Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:354: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2026-01-08T15:36:59.059118Z node 2 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 2 cookie: 6 2026-01-08T15:36:59.059204Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:258: [72075186224037894] EvPropagateStatisticsResponse, cookie: 6 2026-01-08T15:36:59.059367Z node 1 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 1 cookie: 0 2026-01-08T15:36:59.113421Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8463: SendBaseStatsToSA(), path count: 2, paths with incomplete stats: 0, at schemeshard: 72075186224037897 2026-01-08T15:36:59.113485Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8300: Schedule next SendBaseStatsToSA in 5.000000s, at schemeshard: 72075186224037897 2026-01-08T15:36:59.113659Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:27: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id: 72075186224037897, stats byte size: 49, entries count: 2, are all stats full: 1 2026-01-08T15:36:59.126288Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:133: [72075186224037894] TTxSchemeShardStats::Complete 2026-01-08T15:36:59.755977Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 30 ], ReplyToActorId[ [2:4566:3400]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:36:59.756188Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 30 ] 2026-01-08T15:36:59.756216Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 30, ReplyToActorId = [2:4566:3400], StatRequests.size() = 1 2026-01-08T15:37:00.865243Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 31 ], ReplyToActorId[ [2:4599:3412]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:00.865531Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 31 ] 2026-01-08T15:37:00.865577Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 31, ReplyToActorId = [2:4599:3412], StatRequests.size() = 1 2026-01-08T15:37:01.889631Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 32 ], ReplyToActorId[ [2:4632:3422]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:01.889857Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 32 ] 2026-01-08T15:37:01.889888Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 32, ReplyToActorId = [2:4632:3422], StatRequests.size() = 1 2026-01-08T15:37:02.393455Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:309: [72075186224037894] EvPropagateTimeout 2026-01-08T15:37:03.007733Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 33 ], ReplyToActorId[ [2:4669:3434]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:03.008001Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 33 ] 2026-01-08T15:37:03.008043Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 33, ReplyToActorId = [2:4669:3434], StatRequests.size() = 1 2026-01-08T15:37:04.050302Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 34 ], ReplyToActorId[ [2:4704:3446]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:04.050630Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 34 ] 2026-01-08T15:37:04.050674Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 34, ReplyToActorId = [2:4704:3446], StatRequests.size() = 1 2026-01-08T15:37:04.545200Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:354: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2026-01-08T15:37:04.545624Z node 1 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 1 cookie: 7 2026-01-08T15:37:04.545984Z node 2 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 2 cookie: 0 2026-01-08T15:37:04.546080Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:258: [72075186224037894] EvPropagateStatisticsResponse, cookie: 7 2026-01-08T15:37:04.592369Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8463: SendBaseStatsToSA(), path count: 2, paths with incomplete stats: 0, at schemeshard: 72075186224037897 2026-01-08T15:37:04.592441Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8300: Schedule next SendBaseStatsToSA in 5.000000s, at schemeshard: 72075186224037897 2026-01-08T15:37:04.592667Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:27: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id: 72075186224037897, stats byte size: 49, entries count: 2, are all stats full: 1 2026-01-08T15:37:04.606220Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:133: [72075186224037894] TTxSchemeShardStats::Complete 2026-01-08T15:37:05.166927Z node 2 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 35 ], ReplyToActorId[ [2:4739:3460]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:05.167164Z node 2 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 35 ] 2026-01-08T15:37:05.167198Z node 2 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 35, ReplyToActorId = [2:4739:3460], StatRequests.size() = 1 2026-01-08T15:37:05.167555Z node 3 :STATISTICS DEBUG: service_impl.cpp:773: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [3:4741:2581]], StatType[ 0 ], StatRequestsCount[ 1 ] 2026-01-08T15:37:05.170021Z node 3 :STATISTICS DEBUG: service_impl.cpp:790: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2026-01-08T15:37:05.170096Z node 3 :STATISTICS DEBUG: service_impl.cpp:1244: ConnectToSA(), pipe client id = [3:4751:2585] 2026-01-08T15:37:05.170142Z node 3 :STATISTICS DEBUG: service_impl.cpp:1273: SyncNode(), pipe client id = [3:4751:2585] 2026-01-08T15:37:05.173766Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:97: [72075186224037894] EvServerConnected, pipe server id = [2:4759:3462] 2026-01-08T15:37:05.174411Z node 3 :STATISTICS DEBUG: service_impl.cpp:1092: EvClientConnected, node id = 3, client id = [3:4751:2585], server id = [2:4759:3462], tablet id = 72075186224037894, status = OK 2026-01-08T15:37:05.174659Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:144: [72075186224037894] EvConnectNode, pipe server id = [2:4759:3462], node id = 3, have schemeshards count = 0, need schemeshards count = 1 2026-01-08T15:37:05.174710Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:338: [72075186224037894] SendStatisticsToNode(), node id = 3, schemeshard count = 1 2026-01-08T15:37:05.174848Z node 3 :STATISTICS DEBUG: service_impl.cpp:943: EvPropagateStatistics, node id: 3 cookie: 18446744073709551615 2026-01-08T15:37:05.174936Z node 3 :STATISTICS DEBUG: service_impl.cpp:1285: ReplySuccess(), request id = 1, ReplyToActorId = [3:4741:2581], StatRequests.size() = 1 2026-01-08T15:37:05.175108Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:258: [72075186224037894] EvPropagateStatisticsResponse, cookie: 18446744073709551615 |88.6%| [TM] {BAZEL_UPLOAD} ydb/core/statistics/service/ut/unittest >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] |88.7%| [TA] $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {RESULT} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {BAZEL_UPLOAD} $(B)/ydb/core/grpc_services/grpc_request_check_actor_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryPerf::UpdateOn-QueryService+UseSink >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2026-01-08T15:37:08.233494Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.233526Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.233548Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.234030Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.234931Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.249080Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.250057Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.252125Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.252152Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.252173Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.254253Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.255050Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.255265Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.255563Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.255933Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2026-01-08T15:37:08.257085Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.257116Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.257140Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.257551Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.258433Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.258643Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.258844Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.260227Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.262176Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:08.262345Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.262419Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:37:08.263802Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.263836Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.263862Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.264166Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.264910Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.265087Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.265290Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.265771Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.265912Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:08.266030Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.266083Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:37:08.273148Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.273217Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.273253Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.273550Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.274244Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.274442Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.274666Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.275086Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.275204Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:08.275295Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.275336Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:37:08.276348Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.276375Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.276392Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.276698Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.277294Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.277424Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.277604Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.278161Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.278442Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:08.278551Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.278596Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2026-01-08T15:37:08.279610Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.279651Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.279679Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.280003Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.280681Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.280834Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.281029Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.282767Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.283395Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2026-01-08T15:37:08.283491Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.283536Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2026-01-08T15:37:08.284873Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.284914Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.284948Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2026-01-08T15:37:08.285423Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2026-01-08T15:37:08.285908Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2026-01-08T15:37:08.286023Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2026-01-08T15:37:08.286183Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2026-01-08T15:37:08.287378Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:08.288297Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2026-01-08T15:37:08.290981Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2026-01-08T15:37:08.291187Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2026-01-08T15:37:08.291326Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2026-01-08T15:37:08.291368Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2026-01-08T15:37:08.291399Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2026-01-08T15:37:08.291421Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2026-01-08T15:37:08.291462Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2026-01-08T15:37:08.291486Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2026-01-08T15:37:08.291697Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 1 } } 2026-01-08T15:37:08.291876Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [10, 12). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 2 } } |88.8%| [TM] {BAZEL_UPLOAD} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 >> KqpQueryPerf::KvRead+QueryService |88.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_vdisk_io/ydb-core-blobstorage-ut_vdisk_io |88.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk_io/ydb-core-blobstorage-ut_vdisk_io |88.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk_io/ydb-core-blobstorage-ut_vdisk_io >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 14107, MsgBus: 9430 2026-01-08T15:31:27.846731Z node 1 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7593013789116842054:2147];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:27.846790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:31:27.985185Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:28.198468Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:31:28.382455Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:7593013789116841943:2081] 1767886287790044 != 1767886287790047 2026-01-08T15:31:28.389300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:31:28.389442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:31:28.401540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:31:28.425072Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:31:28.685141Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:31:28.862911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:31:28.862952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:31:28.862960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:31:28.863066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:31:28.863132Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:31:29.635916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:31:29.643532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 16 2026-01-08T15:31:32.846487Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7593013789116842054:2147];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:31:32.853222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:31:33.121549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:33.341605Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2026-01-08T15:31:33.389271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814886646716:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.389368Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.389839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814886646727:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.389880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7593013814886646729:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.389912Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:31:33.395021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:31:33.412013Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7593013814886646732:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2026-01-08T15:31:33.547343Z node 1 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [1:7593013814886646784:2605] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 18 2026-01-08T15:31:34.146948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:34.237883Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2026-01-08T15:31:34.957953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2026-01-08T15:31:36.004156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2026-01-08T15:31:36.735613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:36.818586Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 700 2026-01-08T15:31:37.573962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:37.645917Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float4, '1.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float4, '2.5'::float4] ); 701 2026-01-08T15:31:38.223647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:31:38.298075Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg ... D WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:36:55.750684Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7593015197396496886:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2026-01-08T15:36:55.844401Z node 9 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [9:7593015197396496941:2601] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:36:56.201050Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [9:7593015201691464281:2357], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2026-01-08T15:36:56.201714Z node 9 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=9&id=NGQ4YTViNjctNGJlNWU1MDktZGUxMjAxMTEtNDE1YWExNDQ=, ActorId: [9:7593015201691464274:2353], ActorState: ExecuteState, LegacyTraceId: 01kef41xk4ddcxs2m8spp857s7, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Cannot update primary key column: key1" end_position { row: 1 column: 1 } severity: 1 } issues { position { row: 1 column: 1 } message: "Cannot update primary key column: key2" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id# 2026-01-08T15:36:56.251824Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) Trying to start YDB, gRPC: 5861, MsgBus: 9607 2026-01-08T15:36:58.645432Z node 10 :METADATA_PROVIDER WARN: log.cpp:841: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7593015208051328146:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:36:58.645513Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2026-01-08T15:36:58.664121Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: LookupError, path: Root/.metadata/script_executions 2026-01-08T15:36:58.760667Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:58.760763Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:58.762749Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:58.782857Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:36:58.785687Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:575: SchemeBoardDelete /Root Strong=0 2026-01-08T15:36:58.832142Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2026-01-08T15:36:58.832182Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2026-01-08T15:36:58.832194Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2026-01-08T15:36:58.832329Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2026-01-08T15:36:58.843147Z node 10 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:58.888846Z node 10 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions 2026-01-08T15:36:59.633179Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:59.744106Z node 10 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:59.744550Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2026-01-08T15:37:03.645617Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7593015208051328146:2071];send_to=[0:7307199536658146131:7762515]; 2026-01-08T15:37:03.645754Z node 10 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2026-01-08T15:37:03.971748Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593015229526165480:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.971905Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.972375Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593015229526165489:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:03.972462Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.004266Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:37:04.082704Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593015233821132882:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.082830Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.082902Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593015233821132887:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.083220Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7593015233821132889:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.083292Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:263: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2026-01-08T15:37:04.089980Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2026-01-08T15:37:04.102381Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7593015233821132890:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2026-01-08T15:37:04.181897Z node 10 :TX_PROXY ERROR: schemereq.cpp:624: Actor# [10:7593015233821132942:2597] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 42], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2026-01-08T15:37:04.801116Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:625: Compilation failed, self: [10:7593015233821133006:2364], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2026-01-08T15:37:04.801731Z node 10 :KQP_SESSION WARN: {KQPSA@kqp_session_actor.cpp:2679} SessionId: ydb://session/3?node_id=10&id=YzQ0NmRlOWQtZWNlNTliM2QtMjczZDYzZDctNTRkNDhkZGQ=, ActorId: [10:7593015233821132999:2360], ActorState: ExecuteState, LegacyTraceId: 01kef425zv7jvpx5n84hw7ntyg, ReplyQueryCompileError, remove tx status# GENERIC_ERROR issues# { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 1 column: 1 } message: "At function: KiWriteTable!" end_position { row: 1 column: 1 } severity: 1 issues { position { row: 1 column: 1 } message: "Cannot update primary key column: key1" end_position { row: 1 column: 1 } severity: 1 } issues { position { row: 1 column: 1 } message: "Cannot update primary key column: key2" end_position { row: 1 column: 1 } severity: 1 } } } tx_id# trace_id# 2026-01-08T15:37:04.812222Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) |88.8%| [TM] {BAZEL_UPLOAD} ydb/core/kqp/ut/pg/unittest >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> KqpQueryPerf::IndexUpsert+QueryService-UseSink >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService >> TExportToS3Tests::DisableAutoDropping >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant [GOOD] >> TConsoleTests::TestAlterServerlessTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] Test command err: ... waiting for SysViewsRoster update finished 2026-01-08T15:36:47.055057Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:125: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, got undelivered to scheme cache: ActorUnknown 2026-01-08T15:36:47.165915Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:114:2161], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2026-01-08T15:36:47.166247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2026-01-08T15:36:47.166419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:841: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2026-01-08T15:36:47.457370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2026-01-08T15:36:47.457533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2026-01-08T15:36:47.547284Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1374: Notification cookie mismatch for subscription [1:34:2081] 1767886604223144 != 1767886604223148 2026-01-08T15:36:47.563456Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:47.613528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2026-01-08T15:36:47.718065Z node 1 :KQP_PROXY WARN: kqp_finalize_script_service.cpp:137: [ScriptExecutions] [TKqpFinalizeScriptService] Failed to check script execution tables existence, scheme status: PathErrorUnknown, path: Root/.metadata/script_executions ... waiting for SysViewsRoster update finished (done) 2026-01-08T15:36:47.997493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2026-01-08T15:36:48.012020Z node 1 :TX_CONVEYOR ERROR: log.cpp:841: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2026-01-08T15:36:48.123210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:183: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:689) 2026-01-08T15:36:48.180955Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:908:2781] 2026-01-08T15:36:48.181231Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:48.227202Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:911:2783] 2026-01-08T15:36:48.227455Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:48.235494Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:48.235800Z node 1 :TX_DATASHARD INFO: datashard.cpp:374: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:915:2787] 2026-01-08T15:36:48.235959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:661: TxInitSchema.Execute 2026-01-08T15:36:48.243865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:48.245464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2026-01-08T15:36:48.245553Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037888 2026-01-08T15:36:48.245610Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037888 2026-01-08T15:36:48.245944Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:48.246291Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:48.246394Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037888 persisting started state actor id [1:955:2781] in generation 1 2026-01-08T15:36:48.246944Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:48.247032Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:48.248454Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2026-01-08T15:36:48.248511Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037889 2026-01-08T15:36:48.248556Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037889 2026-01-08T15:36:48.248807Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:48.248930Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:48.248981Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037889 persisting started state actor id [1:956:2783] in generation 1 2026-01-08T15:36:48.249389Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:727: TxInitSchema.Complete 2026-01-08T15:36:48.249444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:50: TDataShard::TTxInit::Execute 2026-01-08T15:36:48.250683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1324: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2026-01-08T15:36:48.250773Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1380: LoadLockChangeRecords at tablet: 72075186224037890 2026-01-08T15:36:48.250827Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1429: LoadChangeRecordCommits at tablet: 72075186224037890 2026-01-08T15:36:48.251081Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:94: TDataShard::TTxInit::Complete 2026-01-08T15:36:48.251173Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:102: TDataShard::TTxInitRestored::Execute 2026-01-08T15:36:48.251235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:142: DataShard 72075186224037890 persisting started state actor id [1:957:2787] in generation 1 2026-01-08T15:36:48.262159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:48.295728Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037888 2026-01-08T15:36:48.295923Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:48.296045Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037888, actorId: [1:961:2813] 2026-01-08T15:36:48.296080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037888 2026-01-08T15:36:48.296109Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2026-01-08T15:36:48.296133Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2026-01-08T15:36:48.296405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:48.296431Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037889 2026-01-08T15:36:48.296490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:48.296546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037889, actorId: [1:962:2814] 2026-01-08T15:36:48.296564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037889 2026-01-08T15:36:48.296579Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2026-01-08T15:36:48.296594Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2026-01-08T15:36:48.296896Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:151: TDataShard::TTxInitRestored::Complete 2026-01-08T15:36:48.296924Z node 1 :TX_DATASHARD INFO: datashard.cpp:418: Switched to work state WaitScheme tabletId 72075186224037890 2026-01-08T15:36:48.296971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:458: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2026-01-08T15:36:48.297006Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1259: Change sender created: at tablet: 72075186224037890, actorId: [1:963:2815] 2026-01-08T15:36:48.297029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1264: Trying to activate change sender: at tablet: 72075186224037890 2026-01-08T15:36:48.297048Z node 1 :TX_DATASHARD INFO: datashard.cpp:1281: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2026-01-08T15:36:48.297063Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2026-01-08T15:36:48.297188Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2026-01-08T15:36:48.297281Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2026-01-08T15:36:48.297492Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2026-01-08T15:36:48.297551Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:48.297592Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2026-01-08T15:36:48.297634Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2026-01-08T15:36:48.297703Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2026-01-08T15:36:48.297769Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2026-01-08T15:36:48.297912Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3740: Server connected at leader tablet# 72075186224037888, clientId# [1:898:2776], serverId# [1:912:2784], sessionId# [0:0:0] 2026-01-08T15:36:48.297964Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2026-01-08T15:36:48.297987Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:36:48.298007Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2026-01-08T15:36:48.298031Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2026-01-08T15:36:48.298059Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2026-01-08T15: ... datashard.cpp:4020: Send RS 2 at 72075186224037891 from 72075186224037891 to 72075186224037893 txId 281474976715666 2026-01-08T15:37:08.230390Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2026-01-08T15:37:08.230472Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [3500 : 281474976715666] from 72075186224037891 at tablet 72075186224037891 send result to client [3:1603:3237], exec latency: 0 ms, propose latency: 0 ms 2026-01-08T15:37:08.230609Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1179: EnqueueChangeRecords: at tablet: 72075186224037891, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 50] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 47] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 50] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 47] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 50] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 47] SchemaVersion: 1 } 2026-01-08T15:37:08.230681Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2026-01-08T15:37:08.231033Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:981: [DistEraser] [3:1603:3237] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037891, status# 2 2026-01-08T15:37:08.231265Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3389: Receive RS at 72075186224037893 source 72075186224037891 dest 72075186224037893 producer 72075186224037891 txId 281474976715666 2026-01-08T15:37:08.231373Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037893 got read set: {TEvReadSet step# 3500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletProducer# 72075186224037891 ReadSet.Size()# 19 Seqno# 2 Flags# 0} 2026-01-08T15:37:08.231651Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037891 2026-01-08T15:37:08.232010Z node 3 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 3 change records: to# [3:1410:3118], at tablet# 72075186224037891 2026-01-08T15:37:08.232100Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 3, forgotten# 0, left# 0, at tablet# 72075186224037891 2026-01-08T15:37:08.232201Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037893 step# 3500} 2026-01-08T15:37:08.232259Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2026-01-08T15:37:08.232307Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037893 2026-01-08T15:37:08.232609Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2026-01-08T15:37:08.232655Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 1 immediate 0 planned 1 2026-01-08T15:37:08.232706Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715666] at 72075186224037893 for LoadAndWaitInRS 2026-01-08T15:37:08.233104Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:08.233456Z node 3 :TX_DATASHARD DEBUG: datashard_change_receiving.cpp:468: Handle TEvChangeExchange::TEvApplyRecords: origin# 72075186224037891, generation# 1, at tablet# 72075186224037892 2026-01-08T15:37:08.245546Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2026-01-08T15:37:08.245656Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:810: Complete [3500 : 281474976715666] from 72075186224037893 at tablet 72075186224037893 send result to client [3:1603:3237], exec latency: 0 ms, propose latency: 1 ms 2026-01-08T15:37:08.245750Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:564: Send delayed Ack RS Ack at 72075186224037893 {TEvReadSet step# 3500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletConsumer# 72075186224037893 Flags# 0 Seqno# 2} 2026-01-08T15:37:08.245807Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2026-01-08T15:37:08.245945Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:981: [DistEraser] [3:1603:3237] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037893, status# 2 2026-01-08T15:37:08.245990Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1603:3237] Reply: txId# 281474976715666, status# OK, error# 2026-01-08T15:37:08.246205Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037891 source 72075186224037891 dest 72075186224037893 consumer 72075186224037893 txId 281474976715666 2026-01-08T15:37:08.246631Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037891 2026-01-08T15:37:08.246680Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4502: Conditional erase complete: cookie: 4, at: 72075186224037891 2026-01-08T15:37:08.246849Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3758: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1597:3232], serverId# [3:1598:3233], sessionId# [0:0:0] 2026-01-08T15:37:08.246943Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 3, at tablet# 72075186224037891 2026-01-08T15:37:08.246971Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1096: RemoveChangeRecord: order: 4, at tablet: 72075186224037891 2026-01-08T15:37:08.247097Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1096: RemoveChangeRecord: order: 5, at tablet: 72075186224037891 2026-01-08T15:37:08.247127Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1096: RemoveChangeRecord: order: 6, at tablet: 72075186224037891 2026-01-08T15:37:08.247256Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2026-01-08T15:37:08.247291Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:08.247341Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2026-01-08T15:37:08.248630Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037893 2026-01-08T15:37:08.248979Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037893 2026-01-08T15:37:08.249207Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2026-01-08T15:37:08.249250Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.249321Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for WaitForStreamClearance 2026-01-08T15:37:08.249560Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.249632Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2026-01-08T15:37:08.250542Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2026-01-08T15:37:08.250721Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2026-01-08T15:37:08.302134Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037893 2026-01-08T15:37:08.302237Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715667, at: 72075186224037893 2026-01-08T15:37:08.302425Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2026-01-08T15:37:08.302473Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.302527Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for ReadTableScan 2026-01-08T15:37:08.302706Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:08.302789Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2026-01-08T15:37:08.302850Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2026-01-08T15:37:08.304429Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2026-01-08T15:37:08.304795Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2026-01-08T15:37:08.304986Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2026-01-08T15:37:08.305031Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.305076Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for WaitForStreamClearance 2026-01-08T15:37:08.305301Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.305361Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2026-01-08T15:37:08.305975Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2026-01-08T15:37:08.306138Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2026-01-08T15:37:08.313298Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4486: FullScan complete at 72075186224037892 2026-01-08T15:37:08.313386Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4492: Found op: cookie: 281474976715668, at: 72075186224037892 2026-01-08T15:37:08.313664Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2026-01-08T15:37:08.313714Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2026-01-08T15:37:08.313770Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for ReadTableScan 2026-01-08T15:37:08.313929Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2026-01-08T15:37:08.314015Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2026-01-08T15:37:08.314069Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2143] Leader for TabletID 72057594046678944 is [1:129:2154] sender: [1:130:2058] recipient: [1:112:2143] 2026-01-08T15:36:55.272516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7938: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2026-01-08T15:36:55.272606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7966: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:55.272646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7852: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2026-01-08T15:36:55.272684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7868: OperationsProcessing config: using default configuration 2026-01-08T15:36:55.272742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2026-01-08T15:36:55.272785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7874: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2026-01-08T15:36:55.272853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7998: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2026-01-08T15:36:55.272917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:39: [RootShredManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, ShredInterval# 604800.000000s, ShredBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2026-01-08T15:36:55.273775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:8069: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2026-01-08T15:36:55.274080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2026-01-08T15:36:55.367379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7755: Cannot subscribe to console configs 2026-01-08T15:36:55.367438Z node 1 :IMPORT WARN: schemeshard_import.cpp:440: Table profiles were not loaded 2026-01-08T15:36:55.378407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2026-01-08T15:36:55.378719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2026-01-08T15:36:55.378923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2026-01-08T15:36:55.401191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2026-01-08T15:36:55.401631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:464: Clear TempDirsState with owners number: 0 2026-01-08T15:36:55.402369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2026-01-08T15:36:55.404335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2026-01-08T15:36:55.414784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:160: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:55.415057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_shred_manager.cpp:84: [RootShredManager] Stop 2026-01-08T15:36:55.416423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:36:55.416508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:36:55.416627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2026-01-08T15:36:55.416677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:36:55.416721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:213: TTxServerlessStorageBilling.Complete 2026-01-08T15:36:55.416909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7111: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2026-01-08T15:36:55.572526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: ".sys" } Internal: true FailOnExist: false } TxId: 281474976710657 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_permissions" Type: EAuthPermissions } } TxId: 281474976710658 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_effective_permissions" Type: EAuthEffectivePermissions } } TxId: 281474976710659 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_users" Type: EAuthUsers } } TxId: 281474976710660 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "pg_tables" Type: EPgTables } } TxId: 281474976710661 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_pdisks" Type: EPDisks } } TxId: 281474976710662 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.573995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "auth_group_members" Type: EAuthGroupMembers } } TxId: 281474976710663 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_one_minute" Type: ETopPartitionsByCpuOneMinute } } TxId: 281474976710664 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_request_units_one_hour" Type: ETopQueriesByRequestUnitsOneHour } } TxId: 281474976710665 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "ds_vslots" Type: EVSlots } } TxId: 281474976710666 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_partitions_by_tli_one_hour" Type: ETopPartitionsByTliOneHour } } TxId: 281474976710667 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "nodes" Type: ENodes } } TxId: 281474976710668 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_cpu_time_one_hour" Type: ETopQueriesByCpuTimeOneHour } } TxId: 281474976710669 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_hour" Type: ETopQueriesByReadBytesOneHour } } TxId: 281474976710670 Owner: "metadata@system" UserToken: "\n\017metadata@system\022\000" 2026-01-08T15:36:55.574748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_sysviews_update.cpp:127: SysViewsRosterUpdate# [1:214:2215] at schemeshard: 72057594046678944 Send TEvModifySchemeTransaction: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView Internal: true FailOnExist: false CreateSysView { Name: "top_queries_by_read_bytes_one_minute" Type: ETopQueriesByR ... :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:685: TTxOperationPlanStep Execute, stepId: 5000064, transactions count in step: 1, at schemeshard: 72057594046678944 2026-01-08T15:37:09.295678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000064 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2026-01-08T15:37:09.295754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 129:0 HandleReply TEvOperationPlan, step: 5000064, at schemeshard: 72057594046678944 2026-01-08T15:37:09.295933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 49] was 2 2026-01-08T15:37:09.296092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#129:0 progress is 1/1 2026-01-08T15:37:09.296139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:37:09.296187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:928: Part operation is done id#129:0 progress is 1/1 2026-01-08T15:37:09.296233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:37:09.296303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:37:09.296372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:600: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 49] was 1 2026-01-08T15:37:09.296424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1679: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2026-01-08T15:37:09.296487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1714: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2026-01-08T15:37:09.296533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:995: Operation and all the parts is done, operation id: 129:0 2026-01-08T15:37:09.296572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5577: RemoveTx for txid 129:0 2026-01-08T15:37:09.296725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 49] was 2 2026-01-08T15:37:09.296777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1004: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2026-01-08T15:37:09.296836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 57 2026-01-08T15:37:09.296877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1011: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 49], 18446744073709551615 2026-01-08T15:37:09.299059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:24 2026-01-08T15:37:09.299124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:182: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2026-01-08T15:37:09.299842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:23 2026-01-08T15:37:09.299903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:182: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2026-01-08T15:37:09.300729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:20: SendDeleteRequests, shardsToDelete 2, to hive 72057594037968897, at schemeshard 72057594046678944 2026-01-08T15:37:09.300839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:47: Free shard 72057594046678944:24 hive 72057594037968897 at ss 72057594046678944 2026-01-08T15:37:09.300878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:47: Free shard 72057594046678944:23 hive 72057594037968897 at ss 72057594046678944 2026-01-08T15:37:09.301086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2026-01-08T15:37:09.301127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2026-01-08T15:37:09.301321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 49] 2026-01-08T15:37:09.301533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2026-01-08T15:37:09.301577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 129, path id: 1 2026-01-08T15:37:09.301618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2211], at schemeshard: 72057594046678944, txId: 129, path id: 49 FAKE_COORDINATOR: Erasing txId 129 2026-01-08T15:37:09.302173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 49 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:37:09.302286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 49 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:37:09.302355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:37:09.302405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 49], version: 18446744073709551615 2026-01-08T15:37:09.302451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 49] was 1 2026-01-08T15:37:09.302927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2026-01-08T15:37:09.302994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 49], at schemeshard: 72057594046678944 2026-01-08T15:37:09.390666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2026-01-08T15:37:09.391283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6274: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 57 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:37:09.391391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 57 PathOwnerId: 72057594046678944, cookie: 129 2026-01-08T15:37:09.391426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2026-01-08T15:37:09.391461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 57 2026-01-08T15:37:09.391522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:611: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2026-01-08T15:37:09.391618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2026-01-08T15:37:09.391877Z node 1 :HIVE INFO: tablet_helpers.cpp:1510: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 24 TxId_Deprecated: 0 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 24 TxId_Deprecated: 0 2026-01-08T15:37:09.392210Z node 1 :HIVE INFO: tablet_helpers.cpp:1510: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 23 TxId_Deprecated: 0 FAKEHIVE 72057594037968897 TEvDeleteTablet ShardOwnerId: 72057594046678944 ShardLocalIdx: 23 TxId_Deprecated: 0 2026-01-08T15:37:09.392482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 24, at schemeshard: 72057594046678944 2026-01-08T15:37:09.392708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6378: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 0 ShardOwnerId: 72057594046678944 ShardLocalIdx: 23, at schemeshard: 72057594046678944 2026-01-08T15:37:09.398865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2026-01-08T15:37:09.401238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2026-01-08T15:37:09.401392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2026-01-08T15:37:09.401746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:24 2026-01-08T15:37:09.403060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:176: Deleted shardIdx 72057594046678944:23 TestModificationResult got TxId: 129, wait until txId: 129 TestWaitNotification wait txId: 129 2026-01-08T15:37:09.403917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 129: send EvNotifyTxCompletion 2026-01-08T15:37:09.403974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 129 2026-01-08T15:37:09.404718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:123: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2026-01-08T15:37:09.404868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2026-01-08T15:37:09.404921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [1:2077:3946] TestWaitNotification: OK eventTxId 129 |88.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows |88.8%| [TM] {BAZEL_UPLOAD} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup-QueryService